xref: /qemu/accel/tcg/plugin-gen.c (revision 0bcebaba)
1 /*
2  * plugin-gen.c - TCG-related bits of plugin infrastructure
3  *
4  * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5  * License: GNU GPL, version 2 or later.
6  *   See the COPYING file in the top-level directory.
7  *
8  * We support instrumentation at an instruction granularity. That is,
9  * if a plugin wants to instrument the memory accesses performed by a
10  * particular instruction, it can just do that instead of instrumenting
11  * all memory accesses. Thus, in order to do this we first have to
12  * translate a TB, so that plugins can decide what/where to instrument.
13  *
14  * Injecting the desired instrumentation could be done with a second
15  * translation pass that combined the instrumentation requests, but that
16  * would be ugly and inefficient since we would decode the guest code twice.
17  * Instead, during TB translation we add "empty" instrumentation calls for all
18  * possible instrumentation events, and then once we collect the instrumentation
19  * requests from plugins, we either "fill in" those empty events or remove them
20  * if they have no requests.
21  *
22  * When "filling in" an event we first copy the empty callback's TCG ops. This
23  * might seem unnecessary, but it is done to support an arbitrary number
24  * of callbacks per event. Take for example a regular instruction callback.
25  * We first generate a callback to an empty helper function. Then, if two
26  * plugins register one callback each for this instruction, we make two copies
27  * of the TCG ops generated for the empty callback, substituting the function
28  * pointer that points to the empty helper function with the plugins' desired
29  * callback functions. After that we remove the empty callback's ops.
30  *
31  * Note that the location in TCGOp.args[] of the pointer to a helper function
32  * varies across different guest and host architectures. Instead of duplicating
33  * the logic that figures this out, we rely on the fact that the empty
34  * callbacks point to empty functions that are unique pointers in the program.
35  * Thus, to find the right location we just have to look for a match in
36  * TCGOp.args[]. This is the main reason why we first copy an empty callback's
37  * TCG ops and then fill them in; regardless of whether we have one or many
38  * callbacks for that event, the logic to add all of them is the same.
39  *
40  * When generating more than one callback per event, we make a small
41  * optimization to avoid generating redundant operations. For instance, for the
42  * second and all subsequent callbacks of an event, we do not need to reload the
43  * CPU's index into a TCG temp, since the first callback did it already.
44  */
45 #include "qemu/osdep.h"
46 #include "qemu/plugin.h"
47 #include "cpu.h"
48 #include "tcg/tcg.h"
49 #include "tcg/tcg-temp-internal.h"
50 #include "tcg/tcg-op.h"
51 #include "exec/exec-all.h"
52 #include "exec/plugin-gen.h"
53 #include "exec/translator.h"
54 #include "exec/helper-proto-common.h"
55 
56 #define HELPER_H  "accel/tcg/plugin-helpers.h"
57 #include "exec/helper-info.c.inc"
58 #undef  HELPER_H
59 
60 #ifdef CONFIG_SOFTMMU
61 # define CONFIG_SOFTMMU_GATE 1
62 #else
63 # define CONFIG_SOFTMMU_GATE 0
64 #endif
65 
66 /*
67  * plugin_cb_start TCG op args[]:
68  * 0: enum plugin_gen_from
69  * 1: enum plugin_gen_cb
70  * 2: set to 1 for mem callback that is a write, 0 otherwise.
71  */
72 
73 enum plugin_gen_from {
74     PLUGIN_GEN_FROM_TB,
75     PLUGIN_GEN_FROM_INSN,
76     PLUGIN_GEN_FROM_MEM,
77     PLUGIN_GEN_AFTER_INSN,
78     PLUGIN_GEN_N_FROMS,
79 };
80 
81 enum plugin_gen_cb {
82     PLUGIN_GEN_CB_UDATA,
83     PLUGIN_GEN_CB_UDATA_R,
84     PLUGIN_GEN_CB_INLINE,
85     PLUGIN_GEN_CB_MEM,
86     PLUGIN_GEN_ENABLE_MEM_HELPER,
87     PLUGIN_GEN_DISABLE_MEM_HELPER,
88     PLUGIN_GEN_N_CBS,
89 };
90 
91 /*
92  * These helpers are stubs that get dynamically switched out for calls
93  * direct to the plugin if they are subscribed to.
94  */
95 void HELPER(plugin_vcpu_udata_cb_no_wg)(uint32_t cpu_index, void *udata)
96 { }
97 
98 void HELPER(plugin_vcpu_udata_cb_no_rwg)(uint32_t cpu_index, void *udata)
99 { }
100 
101 void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
102                                 qemu_plugin_meminfo_t info, uint64_t vaddr,
103                                 void *userdata)
104 { }
105 
106 static void gen_empty_udata_cb(void (*gen_helper)(TCGv_i32, TCGv_ptr))
107 {
108     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
109     TCGv_ptr udata = tcg_temp_ebb_new_ptr();
110 
111     tcg_gen_movi_ptr(udata, 0);
112     tcg_gen_ld_i32(cpu_index, tcg_env,
113                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
114     gen_helper(cpu_index, udata);
115 
116     tcg_temp_free_ptr(udata);
117     tcg_temp_free_i32(cpu_index);
118 }
119 
120 static void gen_empty_udata_cb_no_wg(void)
121 {
122     gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_wg);
123 }
124 
125 static void gen_empty_udata_cb_no_rwg(void)
126 {
127     gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_rwg);
128 }
129 
130 /*
131  * For now we only support addi_i64.
132  * When we support more ops, we can generate one empty inline cb for each.
133  */
134 static void gen_empty_inline_cb(void)
135 {
136     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
137     TCGv_ptr cpu_index_as_ptr = tcg_temp_ebb_new_ptr();
138     TCGv_i64 val = tcg_temp_ebb_new_i64();
139     TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
140 
141     tcg_gen_ld_i32(cpu_index, tcg_env,
142                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
143     /* second operand will be replaced by immediate value */
144     tcg_gen_mul_i32(cpu_index, cpu_index, cpu_index);
145     tcg_gen_ext_i32_ptr(cpu_index_as_ptr, cpu_index);
146 
147     tcg_gen_movi_ptr(ptr, 0);
148     tcg_gen_add_ptr(ptr, ptr, cpu_index_as_ptr);
149     tcg_gen_ld_i64(val, ptr, 0);
150     /* second operand will be replaced by immediate value */
151     tcg_gen_add_i64(val, val, val);
152 
153     tcg_gen_st_i64(val, ptr, 0);
154     tcg_temp_free_ptr(ptr);
155     tcg_temp_free_i64(val);
156     tcg_temp_free_ptr(cpu_index_as_ptr);
157     tcg_temp_free_i32(cpu_index);
158 }
159 
160 static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
161 {
162     TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
163     TCGv_i32 meminfo = tcg_temp_ebb_new_i32();
164     TCGv_ptr udata = tcg_temp_ebb_new_ptr();
165 
166     tcg_gen_movi_i32(meminfo, info);
167     tcg_gen_movi_ptr(udata, 0);
168     tcg_gen_ld_i32(cpu_index, tcg_env,
169                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
170 
171     gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata);
172 
173     tcg_temp_free_ptr(udata);
174     tcg_temp_free_i32(meminfo);
175     tcg_temp_free_i32(cpu_index);
176 }
177 
178 /*
179  * Share the same function for enable/disable. When enabling, the NULL
180  * pointer will be overwritten later.
181  */
182 static void gen_empty_mem_helper(void)
183 {
184     TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
185 
186     tcg_gen_movi_ptr(ptr, 0);
187     tcg_gen_st_ptr(ptr, tcg_env, offsetof(CPUState, plugin_mem_cbs) -
188                                  offsetof(ArchCPU, env));
189     tcg_temp_free_ptr(ptr);
190 }
191 
192 static void gen_plugin_cb_start(enum plugin_gen_from from,
193                                 enum plugin_gen_cb type, unsigned wr)
194 {
195     tcg_gen_plugin_cb_start(from, type, wr);
196 }
197 
198 static void gen_wrapped(enum plugin_gen_from from,
199                         enum plugin_gen_cb type, void (*func)(void))
200 {
201     gen_plugin_cb_start(from, type, 0);
202     func();
203     tcg_gen_plugin_cb_end();
204 }
205 
206 static void plugin_gen_empty_callback(enum plugin_gen_from from)
207 {
208     switch (from) {
209     case PLUGIN_GEN_AFTER_INSN:
210         gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER,
211                     gen_empty_mem_helper);
212         break;
213     case PLUGIN_GEN_FROM_INSN:
214         /*
215          * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being
216          * the first callback of an instruction
217          */
218         gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER,
219                     gen_empty_mem_helper);
220         /* fall through */
221     case PLUGIN_GEN_FROM_TB:
222         gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb_no_rwg);
223         gen_wrapped(from, PLUGIN_GEN_CB_UDATA_R, gen_empty_udata_cb_no_wg);
224         gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
225         break;
226     default:
227         g_assert_not_reached();
228     }
229 }
230 
231 void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info)
232 {
233     enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
234 
235     gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_MEM, rw);
236     gen_empty_mem_cb(addr, info);
237     tcg_gen_plugin_cb_end();
238 
239     gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_INLINE, rw);
240     gen_empty_inline_cb();
241     tcg_gen_plugin_cb_end();
242 }
243 
244 static TCGOp *find_op(TCGOp *op, TCGOpcode opc)
245 {
246     while (op) {
247         if (op->opc == opc) {
248             return op;
249         }
250         op = QTAILQ_NEXT(op, link);
251     }
252     return NULL;
253 }
254 
255 static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end)
256 {
257     TCGOp *ret = QTAILQ_NEXT(end, link);
258 
259     QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link);
260     return ret;
261 }
262 
263 /* remove all ops until (and including) plugin_cb_end */
264 static TCGOp *rm_ops(TCGOp *op)
265 {
266     TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end);
267 
268     tcg_debug_assert(end_op);
269     return rm_ops_range(op, end_op);
270 }
271 
272 static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op)
273 {
274     TCGOp *old_op = QTAILQ_NEXT(*begin_op, link);
275     unsigned nargs = old_op->nargs;
276 
277     *begin_op = old_op;
278     op = tcg_op_insert_after(tcg_ctx, op, old_op->opc, nargs);
279     memcpy(op->args, old_op->args, sizeof(op->args[0]) * nargs);
280 
281     return op;
282 }
283 
284 static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc)
285 {
286     op = copy_op_nocheck(begin_op, op);
287     tcg_debug_assert((*begin_op)->opc == opc);
288     return op;
289 }
290 
291 static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
292 {
293     if (UINTPTR_MAX == UINT32_MAX) {
294         /* mov_i32 */
295         op = copy_op(begin_op, op, INDEX_op_mov_i32);
296         op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr));
297     } else {
298         /* mov_i64 */
299         op = copy_op(begin_op, op, INDEX_op_mov_i64);
300         op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr));
301     }
302     return op;
303 }
304 
305 static TCGOp *copy_ld_i32(TCGOp **begin_op, TCGOp *op)
306 {
307     return copy_op(begin_op, op, INDEX_op_ld_i32);
308 }
309 
310 static TCGOp *copy_ext_i32_ptr(TCGOp **begin_op, TCGOp *op)
311 {
312     if (UINTPTR_MAX == UINT32_MAX) {
313         op = copy_op(begin_op, op, INDEX_op_mov_i32);
314     } else {
315         op = copy_op(begin_op, op, INDEX_op_ext_i32_i64);
316     }
317     return op;
318 }
319 
320 static TCGOp *copy_add_ptr(TCGOp **begin_op, TCGOp *op)
321 {
322     if (UINTPTR_MAX == UINT32_MAX) {
323         op = copy_op(begin_op, op, INDEX_op_add_i32);
324     } else {
325         op = copy_op(begin_op, op, INDEX_op_add_i64);
326     }
327     return op;
328 }
329 
330 static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
331 {
332     if (TCG_TARGET_REG_BITS == 32) {
333         /* 2x ld_i32 */
334         op = copy_ld_i32(begin_op, op);
335         op = copy_ld_i32(begin_op, op);
336     } else {
337         /* ld_i64 */
338         op = copy_op(begin_op, op, INDEX_op_ld_i64);
339     }
340     return op;
341 }
342 
343 static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op)
344 {
345     if (TCG_TARGET_REG_BITS == 32) {
346         /* 2x st_i32 */
347         op = copy_op(begin_op, op, INDEX_op_st_i32);
348         op = copy_op(begin_op, op, INDEX_op_st_i32);
349     } else {
350         /* st_i64 */
351         op = copy_op(begin_op, op, INDEX_op_st_i64);
352     }
353     return op;
354 }
355 
356 static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
357 {
358     if (TCG_TARGET_REG_BITS == 32) {
359         /* all 32-bit backends must implement add2_i32 */
360         g_assert(TCG_TARGET_HAS_add2_i32);
361         op = copy_op(begin_op, op, INDEX_op_add2_i32);
362         op->args[4] = tcgv_i32_arg(tcg_constant_i32(v));
363         op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32));
364     } else {
365         op = copy_op(begin_op, op, INDEX_op_add_i64);
366         op->args[2] = tcgv_i64_arg(tcg_constant_i64(v));
367     }
368     return op;
369 }
370 
371 static TCGOp *copy_mul_i32(TCGOp **begin_op, TCGOp *op, uint32_t v)
372 {
373     op = copy_op(begin_op, op, INDEX_op_mul_i32);
374     op->args[2] = tcgv_i32_arg(tcg_constant_i32(v));
375     return op;
376 }
377 
378 static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
379 {
380     if (UINTPTR_MAX == UINT32_MAX) {
381         /* st_i32 */
382         op = copy_op(begin_op, op, INDEX_op_st_i32);
383     } else {
384         /* st_i64 */
385         op = copy_st_i64(begin_op, op);
386     }
387     return op;
388 }
389 
390 static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *func, int *cb_idx)
391 {
392     TCGOp *old_op;
393     int func_idx;
394 
395     /* copy all ops until the call */
396     do {
397         op = copy_op_nocheck(begin_op, op);
398     } while (op->opc != INDEX_op_call);
399 
400     /* fill in the op call */
401     old_op = *begin_op;
402     TCGOP_CALLI(op) = TCGOP_CALLI(old_op);
403     TCGOP_CALLO(op) = TCGOP_CALLO(old_op);
404     tcg_debug_assert(op->life == 0);
405 
406     func_idx = TCGOP_CALLO(op) + TCGOP_CALLI(op);
407     *cb_idx = func_idx;
408     op->args[func_idx] = (uintptr_t)func;
409 
410     return op;
411 }
412 
413 /*
414  * When we append/replace ops here we are sensitive to changing patterns of
415  * TCGOps generated by the tcg_gen_FOO calls when we generated the
416  * empty callbacks. This will assert very quickly in a debug build as
417  * we assert the ops we are replacing are the correct ones.
418  */
419 static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
420                               TCGOp *begin_op, TCGOp *op, int *cb_idx)
421 {
422     /* const_ptr */
423     op = copy_const_ptr(&begin_op, op, cb->userp);
424 
425     /* copy the ld_i32, but note that we only have to copy it once */
426     if (*cb_idx == -1) {
427         op = copy_op(&begin_op, op, INDEX_op_ld_i32);
428     } else {
429         begin_op = QTAILQ_NEXT(begin_op, link);
430         tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
431     }
432 
433     /* call */
434     op = copy_call(&begin_op, op, cb->f.vcpu_udata, cb_idx);
435 
436     return op;
437 }
438 
439 static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
440                                TCGOp *begin_op, TCGOp *op,
441                                int *unused)
442 {
443     char *ptr = cb->userp;
444     size_t elem_size = 0;
445     size_t offset = 0;
446     if (!ptr) {
447         /* use inline entry */
448         ptr = cb->inline_insn.entry.score->data->data;
449         elem_size = g_array_get_element_size(cb->inline_insn.entry.score->data);
450         offset = cb->inline_insn.entry.offset;
451     }
452 
453     op = copy_ld_i32(&begin_op, op);
454     op = copy_mul_i32(&begin_op, op, elem_size);
455     op = copy_ext_i32_ptr(&begin_op, op);
456     op = copy_const_ptr(&begin_op, op, ptr + offset);
457     op = copy_add_ptr(&begin_op, op);
458     op = copy_ld_i64(&begin_op, op);
459     op = copy_add_i64(&begin_op, op, cb->inline_insn.imm);
460     op = copy_st_i64(&begin_op, op);
461     return op;
462 }
463 
464 static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
465                             TCGOp *begin_op, TCGOp *op, int *cb_idx)
466 {
467     enum plugin_gen_cb type = begin_op->args[1];
468 
469     tcg_debug_assert(type == PLUGIN_GEN_CB_MEM);
470 
471     /* const_i32 == mov_i32 ("info", so it remains as is) */
472     op = copy_op(&begin_op, op, INDEX_op_mov_i32);
473 
474     /* const_ptr */
475     op = copy_const_ptr(&begin_op, op, cb->userp);
476 
477     /* copy the ld_i32, but note that we only have to copy it once */
478     if (*cb_idx == -1) {
479         op = copy_op(&begin_op, op, INDEX_op_ld_i32);
480     } else {
481         begin_op = QTAILQ_NEXT(begin_op, link);
482         tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
483     }
484 
485     if (type == PLUGIN_GEN_CB_MEM) {
486         /* call */
487         op = copy_call(&begin_op, op, cb->f.vcpu_udata, cb_idx);
488     }
489 
490     return op;
491 }
492 
493 typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb,
494                             TCGOp *begin_op, TCGOp *op, int *intp);
495 typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb);
496 
497 static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
498 {
499     return true;
500 }
501 
502 static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
503 {
504     int w;
505 
506     w = op->args[2];
507     return !!(cb->rw & (w + 1));
508 }
509 
510 static void inject_cb_type(const GArray *cbs, TCGOp *begin_op,
511                            inject_fn inject, op_ok_fn ok)
512 {
513     TCGOp *end_op;
514     TCGOp *op;
515     int cb_idx = -1;
516     int i;
517 
518     if (!cbs || cbs->len == 0) {
519         rm_ops(begin_op);
520         return;
521     }
522 
523     end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
524     tcg_debug_assert(end_op);
525 
526     op = end_op;
527     for (i = 0; i < cbs->len; i++) {
528         struct qemu_plugin_dyn_cb *cb =
529             &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
530 
531         if (!ok(begin_op, cb)) {
532             continue;
533         }
534         op = inject(cb, begin_op, op, &cb_idx);
535     }
536     rm_ops_range(begin_op, end_op);
537 }
538 
539 static void
540 inject_udata_cb(const GArray *cbs, TCGOp *begin_op)
541 {
542     inject_cb_type(cbs, begin_op, append_udata_cb, op_ok);
543 }
544 
545 static void
546 inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok)
547 {
548     inject_cb_type(cbs, begin_op, append_inline_cb, ok);
549 }
550 
551 static void
552 inject_mem_cb(const GArray *cbs, TCGOp *begin_op)
553 {
554     inject_cb_type(cbs, begin_op, append_mem_cb, op_rw);
555 }
556 
557 /* we could change the ops in place, but we can reuse more code by copying */
558 static void inject_mem_helper(TCGOp *begin_op, GArray *arr)
559 {
560     TCGOp *orig_op = begin_op;
561     TCGOp *end_op;
562     TCGOp *op;
563 
564     end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
565     tcg_debug_assert(end_op);
566 
567     /* const ptr */
568     op = copy_const_ptr(&begin_op, end_op, arr);
569 
570     /* st_ptr */
571     op = copy_st_ptr(&begin_op, op);
572 
573     rm_ops_range(orig_op, end_op);
574 }
575 
576 /*
577  * Tracking memory accesses performed from helpers requires extra work.
578  * If an instruction is emulated with helpers, we do two things:
579  * (1) copy the CB descriptors, and keep track of it so that they can be
580  * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so
581  * that we can read them at run-time (i.e. when the helper executes).
582  * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
583  *
584  * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
585  * is possible that the code we generate after the instruction is
586  * dead, we also add checks before generating tb_exit etc.
587  */
588 static void inject_mem_enable_helper(struct qemu_plugin_tb *ptb,
589                                      struct qemu_plugin_insn *plugin_insn,
590                                      TCGOp *begin_op)
591 {
592     GArray *cbs[2];
593     GArray *arr;
594     size_t n_cbs, i;
595 
596     cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
597     cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
598 
599     n_cbs = 0;
600     for (i = 0; i < ARRAY_SIZE(cbs); i++) {
601         n_cbs += cbs[i]->len;
602     }
603 
604     plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs;
605     if (likely(!plugin_insn->mem_helper)) {
606         rm_ops(begin_op);
607         return;
608     }
609     ptb->mem_helper = true;
610 
611     arr = g_array_sized_new(false, false,
612                             sizeof(struct qemu_plugin_dyn_cb), n_cbs);
613 
614     for (i = 0; i < ARRAY_SIZE(cbs); i++) {
615         g_array_append_vals(arr, cbs[i]->data, cbs[i]->len);
616     }
617 
618     qemu_plugin_add_dyn_cb_arr(arr);
619     inject_mem_helper(begin_op, arr);
620 }
621 
622 static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn,
623                                       TCGOp *begin_op)
624 {
625     if (likely(!plugin_insn->mem_helper)) {
626         rm_ops(begin_op);
627         return;
628     }
629     inject_mem_helper(begin_op, NULL);
630 }
631 
632 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
633 void plugin_gen_disable_mem_helpers(void)
634 {
635     /*
636      * We could emit the clearing unconditionally and be done. However, this can
637      * be wasteful if for instance plugins don't track memory accesses, or if
638      * most TBs don't use helpers. Instead, emit the clearing iff the TB calls
639      * helpers that might access guest memory.
640      *
641      * Note: we do not reset plugin_tb->mem_helper here; a TB might have several
642      * exit points, and we want to emit the clearing from all of them.
643      */
644     if (!tcg_ctx->plugin_tb->mem_helper) {
645         return;
646     }
647     tcg_gen_st_ptr(tcg_constant_ptr(NULL), tcg_env,
648                    offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env));
649 }
650 
651 static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,
652                                 TCGOp *begin_op)
653 {
654     inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op);
655 }
656 
657 static void plugin_gen_tb_udata_r(const struct qemu_plugin_tb *ptb,
658                                   TCGOp *begin_op)
659 {
660     inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR_R], begin_op);
661 }
662 
663 static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb,
664                                  TCGOp *begin_op)
665 {
666     inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok);
667 }
668 
669 static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
670                                   TCGOp *begin_op, int insn_idx)
671 {
672     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
673 
674     inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op);
675 }
676 
677 static void plugin_gen_insn_udata_r(const struct qemu_plugin_tb *ptb,
678                                     TCGOp *begin_op, int insn_idx)
679 {
680     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
681 
682     inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR_R], begin_op);
683 }
684 
685 static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb,
686                                    TCGOp *begin_op, int insn_idx)
687 {
688     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
689     inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE],
690                      begin_op, op_ok);
691 }
692 
693 static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb,
694                                    TCGOp *begin_op, int insn_idx)
695 {
696     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
697     inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op);
698 }
699 
700 static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb,
701                                   TCGOp *begin_op, int insn_idx)
702 {
703     const GArray *cbs;
704     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
705 
706     cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
707     inject_inline_cb(cbs, begin_op, op_rw);
708 }
709 
710 static void plugin_gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
711                                          TCGOp *begin_op, int insn_idx)
712 {
713     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
714     inject_mem_enable_helper(ptb, insn, begin_op);
715 }
716 
717 static void plugin_gen_disable_mem_helper(struct qemu_plugin_tb *ptb,
718                                           TCGOp *begin_op, int insn_idx)
719 {
720     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
721     inject_mem_disable_helper(insn, begin_op);
722 }
723 
724 /* #define DEBUG_PLUGIN_GEN_OPS */
725 static void pr_ops(void)
726 {
727 #ifdef DEBUG_PLUGIN_GEN_OPS
728     TCGOp *op;
729     int i = 0;
730 
731     QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
732         const char *name = "";
733         const char *type = "";
734 
735         if (op->opc == INDEX_op_plugin_cb_start) {
736             switch (op->args[0]) {
737             case PLUGIN_GEN_FROM_TB:
738                 name = "tb";
739                 break;
740             case PLUGIN_GEN_FROM_INSN:
741                 name = "insn";
742                 break;
743             case PLUGIN_GEN_FROM_MEM:
744                 name = "mem";
745                 break;
746             case PLUGIN_GEN_AFTER_INSN:
747                 name = "after insn";
748                 break;
749             default:
750                 break;
751             }
752             switch (op->args[1]) {
753             case PLUGIN_GEN_CB_UDATA:
754                 type = "udata";
755                 break;
756             case PLUGIN_GEN_CB_INLINE:
757                 type = "inline";
758                 break;
759             case PLUGIN_GEN_CB_MEM:
760                 type = "mem";
761                 break;
762             case PLUGIN_GEN_ENABLE_MEM_HELPER:
763                 type = "enable mem helper";
764                 break;
765             case PLUGIN_GEN_DISABLE_MEM_HELPER:
766                 type = "disable mem helper";
767                 break;
768             default:
769                 break;
770             }
771         }
772         printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type);
773         i++;
774     }
775 #endif
776 }
777 
778 static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
779 {
780     TCGOp *op;
781     int insn_idx = -1;
782 
783     pr_ops();
784 
785     QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
786         switch (op->opc) {
787         case INDEX_op_insn_start:
788             insn_idx++;
789             break;
790         case INDEX_op_plugin_cb_start:
791         {
792             enum plugin_gen_from from = op->args[0];
793             enum plugin_gen_cb type = op->args[1];
794 
795             switch (from) {
796             case PLUGIN_GEN_FROM_TB:
797             {
798                 g_assert(insn_idx == -1);
799 
800                 switch (type) {
801                 case PLUGIN_GEN_CB_UDATA:
802                     plugin_gen_tb_udata(plugin_tb, op);
803                     break;
804                 case PLUGIN_GEN_CB_UDATA_R:
805                     plugin_gen_tb_udata_r(plugin_tb, op);
806                     break;
807                 case PLUGIN_GEN_CB_INLINE:
808                     plugin_gen_tb_inline(plugin_tb, op);
809                     break;
810                 default:
811                     g_assert_not_reached();
812                 }
813                 break;
814             }
815             case PLUGIN_GEN_FROM_INSN:
816             {
817                 g_assert(insn_idx >= 0);
818 
819                 switch (type) {
820                 case PLUGIN_GEN_CB_UDATA:
821                     plugin_gen_insn_udata(plugin_tb, op, insn_idx);
822                     break;
823                 case PLUGIN_GEN_CB_UDATA_R:
824                     plugin_gen_insn_udata_r(plugin_tb, op, insn_idx);
825                     break;
826                 case PLUGIN_GEN_CB_INLINE:
827                     plugin_gen_insn_inline(plugin_tb, op, insn_idx);
828                     break;
829                 case PLUGIN_GEN_ENABLE_MEM_HELPER:
830                     plugin_gen_enable_mem_helper(plugin_tb, op, insn_idx);
831                     break;
832                 default:
833                     g_assert_not_reached();
834                 }
835                 break;
836             }
837             case PLUGIN_GEN_FROM_MEM:
838             {
839                 g_assert(insn_idx >= 0);
840 
841                 switch (type) {
842                 case PLUGIN_GEN_CB_MEM:
843                     plugin_gen_mem_regular(plugin_tb, op, insn_idx);
844                     break;
845                 case PLUGIN_GEN_CB_INLINE:
846                     plugin_gen_mem_inline(plugin_tb, op, insn_idx);
847                     break;
848                 default:
849                     g_assert_not_reached();
850                 }
851 
852                 break;
853             }
854             case PLUGIN_GEN_AFTER_INSN:
855             {
856                 g_assert(insn_idx >= 0);
857 
858                 switch (type) {
859                 case PLUGIN_GEN_DISABLE_MEM_HELPER:
860                     plugin_gen_disable_mem_helper(plugin_tb, op, insn_idx);
861                     break;
862                 default:
863                     g_assert_not_reached();
864                 }
865                 break;
866             }
867             default:
868                 g_assert_not_reached();
869             }
870             break;
871         }
872         default:
873             /* plugins don't care about any other ops */
874             break;
875         }
876     }
877     pr_ops();
878 }
879 
880 bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
881                          bool mem_only)
882 {
883     bool ret = false;
884 
885     if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_state->event_mask)) {
886         struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
887         int i;
888 
889         /* reset callbacks */
890         for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) {
891             if (ptb->cbs[i]) {
892                 g_array_set_size(ptb->cbs[i], 0);
893             }
894         }
895         ptb->n = 0;
896 
897         ret = true;
898 
899         ptb->vaddr = db->pc_first;
900         ptb->vaddr2 = -1;
901         ptb->haddr1 = db->host_addr[0];
902         ptb->haddr2 = NULL;
903         ptb->mem_only = mem_only;
904         ptb->mem_helper = false;
905 
906         plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB);
907     }
908 
909     tcg_ctx->plugin_insn = NULL;
910 
911     return ret;
912 }
913 
914 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
915 {
916     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
917     struct qemu_plugin_insn *pinsn;
918 
919     pinsn = qemu_plugin_tb_insn_get(ptb, db->pc_next);
920     tcg_ctx->plugin_insn = pinsn;
921     plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN);
922 
923     /*
924      * Detect page crossing to get the new host address.
925      * Note that we skip this when haddr1 == NULL, e.g. when we're
926      * fetching instructions from a region not backed by RAM.
927      */
928     if (ptb->haddr1 == NULL) {
929         pinsn->haddr = NULL;
930     } else if (is_same_page(db, db->pc_next)) {
931         pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
932     } else {
933         if (ptb->vaddr2 == -1) {
934             ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
935             get_page_addr_code_hostp(cpu_env(cpu), ptb->vaddr2, &ptb->haddr2);
936         }
937         pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
938     }
939 }
940 
941 void plugin_gen_insn_end(void)
942 {
943     plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN);
944 }
945 
946 /*
947  * There are cases where we never get to finalise a translation - for
948  * example a page fault during translation. As a result we shouldn't
949  * do any clean-up here and make sure things are reset in
950  * plugin_gen_tb_start.
951  */
952 void plugin_gen_tb_end(CPUState *cpu, size_t num_insns)
953 {
954     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
955 
956     /* translator may have removed instructions, update final count */
957     g_assert(num_insns <= ptb->n);
958     ptb->n = num_insns;
959 
960     /* collect instrumentation requests */
961     qemu_plugin_tb_trans_cb(cpu, ptb);
962 
963     /* inject the instrumentation at the appropriate places */
964     plugin_gen_inject(ptb);
965 }
966