xref: /qemu/accel/tcg/plugin-gen.c (revision d0fb9657)
1 /*
2  * plugin-gen.c - TCG-related bits of plugin infrastructure
3  *
4  * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5  * License: GNU GPL, version 2 or later.
6  *   See the COPYING file in the top-level directory.
7  *
8  * We support instrumentation at an instruction granularity. That is,
9  * if a plugin wants to instrument the memory accesses performed by a
10  * particular instruction, it can just do that instead of instrumenting
11  * all memory accesses. Thus, in order to do this we first have to
12  * translate a TB, so that plugins can decide what/where to instrument.
13  *
14  * Injecting the desired instrumentation could be done with a second
15  * translation pass that combined the instrumentation requests, but that
16  * would be ugly and inefficient since we would decode the guest code twice.
17  * Instead, during TB translation we add "empty" instrumentation calls for all
18  * possible instrumentation events, and then once we collect the instrumentation
19  * requests from plugins, we either "fill in" those empty events or remove them
20  * if they have no requests.
21  *
22  * When "filling in" an event we first copy the empty callback's TCG ops. This
23  * might seem unnecessary, but it is done to support an arbitrary number
24  * of callbacks per event. Take for example a regular instruction callback.
25  * We first generate a callback to an empty helper function. Then, if two
26  * plugins register one callback each for this instruction, we make two copies
27  * of the TCG ops generated for the empty callback, substituting the function
28  * pointer that points to the empty helper function with the plugins' desired
29  * callback functions. After that we remove the empty callback's ops.
30  *
31  * Note that the location in TCGOp.args[] of the pointer to a helper function
32  * varies across different guest and host architectures. Instead of duplicating
33  * the logic that figures this out, we rely on the fact that the empty
34  * callbacks point to empty functions that are unique pointers in the program.
35  * Thus, to find the right location we just have to look for a match in
36  * TCGOp.args[]. This is the main reason why we first copy an empty callback's
37  * TCG ops and then fill them in; regardless of whether we have one or many
38  * callbacks for that event, the logic to add all of them is the same.
39  *
40  * When generating more than one callback per event, we make a small
41  * optimization to avoid generating redundant operations. For instance, for the
42  * second and all subsequent callbacks of an event, we do not need to reload the
43  * CPU's index into a TCG temp, since the first callback did it already.
44  */
45 #include "qemu/osdep.h"
46 #include "tcg/tcg.h"
47 #include "tcg/tcg-op.h"
48 #include "trace/mem.h"
49 #include "exec/exec-all.h"
50 #include "exec/plugin-gen.h"
51 #include "exec/translator.h"
52 
53 #ifdef CONFIG_SOFTMMU
54 # define CONFIG_SOFTMMU_GATE 1
55 #else
56 # define CONFIG_SOFTMMU_GATE 0
57 #endif
58 
59 /*
60  * plugin_cb_start TCG op args[]:
61  * 0: enum plugin_gen_from
62  * 1: enum plugin_gen_cb
63  * 2: set to 1 for mem callback that is a write, 0 otherwise.
64  */
65 
66 enum plugin_gen_from {
67     PLUGIN_GEN_FROM_TB,
68     PLUGIN_GEN_FROM_INSN,
69     PLUGIN_GEN_FROM_MEM,
70     PLUGIN_GEN_AFTER_INSN,
71     PLUGIN_GEN_N_FROMS,
72 };
73 
74 enum plugin_gen_cb {
75     PLUGIN_GEN_CB_UDATA,
76     PLUGIN_GEN_CB_INLINE,
77     PLUGIN_GEN_CB_MEM,
78     PLUGIN_GEN_ENABLE_MEM_HELPER,
79     PLUGIN_GEN_DISABLE_MEM_HELPER,
80     PLUGIN_GEN_N_CBS,
81 };
82 
83 /*
84  * These helpers are stubs that get dynamically switched out for calls
85  * direct to the plugin if they are subscribed to.
86  */
87 void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata)
88 { }
89 
90 void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
91                                 qemu_plugin_meminfo_t info, uint64_t vaddr,
92                                 void *userdata)
93 { }
94 
95 static void do_gen_mem_cb(TCGv vaddr, uint32_t info)
96 {
97     TCGv_i32 cpu_index = tcg_temp_new_i32();
98     TCGv_i32 meminfo = tcg_const_i32(info);
99     TCGv_i64 vaddr64 = tcg_temp_new_i64();
100     TCGv_ptr udata = tcg_const_ptr(NULL);
101 
102     tcg_gen_ld_i32(cpu_index, cpu_env,
103                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
104     tcg_gen_extu_tl_i64(vaddr64, vaddr);
105 
106     gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata);
107 
108     tcg_temp_free_ptr(udata);
109     tcg_temp_free_i64(vaddr64);
110     tcg_temp_free_i32(meminfo);
111     tcg_temp_free_i32(cpu_index);
112 }
113 
114 static void gen_empty_udata_cb(void)
115 {
116     TCGv_i32 cpu_index = tcg_temp_new_i32();
117     TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */
118 
119     tcg_gen_ld_i32(cpu_index, cpu_env,
120                    -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
121     gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
122 
123     tcg_temp_free_ptr(udata);
124     tcg_temp_free_i32(cpu_index);
125 }
126 
127 /*
128  * For now we only support addi_i64.
129  * When we support more ops, we can generate one empty inline cb for each.
130  */
131 static void gen_empty_inline_cb(void)
132 {
133     TCGv_i64 val = tcg_temp_new_i64();
134     TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */
135 
136     tcg_gen_ld_i64(val, ptr, 0);
137     /* pass an immediate != 0 so that it doesn't get optimized away */
138     tcg_gen_addi_i64(val, val, 0xdeadface);
139     tcg_gen_st_i64(val, ptr, 0);
140     tcg_temp_free_ptr(ptr);
141     tcg_temp_free_i64(val);
142 }
143 
144 static void gen_empty_mem_cb(TCGv addr, uint32_t info)
145 {
146     do_gen_mem_cb(addr, info);
147 }
148 
149 /*
150  * Share the same function for enable/disable. When enabling, the NULL
151  * pointer will be overwritten later.
152  */
153 static void gen_empty_mem_helper(void)
154 {
155     TCGv_ptr ptr;
156 
157     ptr = tcg_const_ptr(NULL);
158     tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
159                                  offsetof(ArchCPU, env));
160     tcg_temp_free_ptr(ptr);
161 }
162 
163 static inline
164 void gen_plugin_cb_start(enum plugin_gen_from from,
165                          enum plugin_gen_cb type, unsigned wr)
166 {
167     TCGOp *op;
168 
169     tcg_gen_plugin_cb_start(from, type, wr);
170     op = tcg_last_op();
171     QSIMPLEQ_INSERT_TAIL(&tcg_ctx->plugin_ops, op, plugin_link);
172 }
173 
174 static void gen_wrapped(enum plugin_gen_from from,
175                         enum plugin_gen_cb type, void (*func)(void))
176 {
177     gen_plugin_cb_start(from, type, 0);
178     func();
179     tcg_gen_plugin_cb_end();
180 }
181 
182 static inline void plugin_gen_empty_callback(enum plugin_gen_from from)
183 {
184     switch (from) {
185     case PLUGIN_GEN_AFTER_INSN:
186         gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER,
187                     gen_empty_mem_helper);
188         break;
189     case PLUGIN_GEN_FROM_INSN:
190         /*
191          * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being
192          * the first callback of an instruction
193          */
194         gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER,
195                     gen_empty_mem_helper);
196         /* fall through */
197     case PLUGIN_GEN_FROM_TB:
198         gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb);
199         gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
200         break;
201     default:
202         g_assert_not_reached();
203     }
204 }
205 
206 union mem_gen_fn {
207     void (*mem_fn)(TCGv, uint32_t);
208     void (*inline_fn)(void);
209 };
210 
211 static void gen_mem_wrapped(enum plugin_gen_cb type,
212                             const union mem_gen_fn *f, TCGv addr,
213                             uint32_t info, bool is_mem)
214 {
215     int wr = !!(info & TRACE_MEM_ST);
216 
217     gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, wr);
218     if (is_mem) {
219         f->mem_fn(addr, info);
220     } else {
221         f->inline_fn();
222     }
223     tcg_gen_plugin_cb_end();
224 }
225 
226 void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info)
227 {
228     union mem_gen_fn fn;
229 
230     fn.mem_fn = gen_empty_mem_cb;
231     gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true);
232 
233     fn.inline_fn = gen_empty_inline_cb;
234     gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false);
235 }
236 
237 static TCGOp *find_op(TCGOp *op, TCGOpcode opc)
238 {
239     while (op) {
240         if (op->opc == opc) {
241             return op;
242         }
243         op = QTAILQ_NEXT(op, link);
244     }
245     return NULL;
246 }
247 
248 static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end)
249 {
250     TCGOp *ret = QTAILQ_NEXT(end, link);
251 
252     QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link);
253     return ret;
254 }
255 
256 /* remove all ops until (and including) plugin_cb_end */
257 static TCGOp *rm_ops(TCGOp *op)
258 {
259     TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end);
260 
261     tcg_debug_assert(end_op);
262     return rm_ops_range(op, end_op);
263 }
264 
265 static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op)
266 {
267     *begin_op = QTAILQ_NEXT(*begin_op, link);
268     tcg_debug_assert(*begin_op);
269     op = tcg_op_insert_after(tcg_ctx, op, (*begin_op)->opc);
270     memcpy(op->args, (*begin_op)->args, sizeof(op->args));
271     return op;
272 }
273 
274 static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc)
275 {
276     op = copy_op_nocheck(begin_op, op);
277     tcg_debug_assert((*begin_op)->opc == opc);
278     return op;
279 }
280 
281 static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op)
282 {
283     if (TCG_TARGET_REG_BITS == 32) {
284         /* mov_i32 */
285         op = copy_op(begin_op, op, INDEX_op_mov_i32);
286         /* mov_i32 w/ $0 */
287         op = copy_op(begin_op, op, INDEX_op_mov_i32);
288     } else {
289         /* extu_i32_i64 */
290         op = copy_op(begin_op, op, INDEX_op_extu_i32_i64);
291     }
292     return op;
293 }
294 
295 static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op)
296 {
297     if (TCG_TARGET_REG_BITS == 32) {
298         /* 2x mov_i32 */
299         op = copy_op(begin_op, op, INDEX_op_mov_i32);
300         op = copy_op(begin_op, op, INDEX_op_mov_i32);
301     } else {
302         /* mov_i64 */
303         op = copy_op(begin_op, op, INDEX_op_mov_i64);
304     }
305     return op;
306 }
307 
308 static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
309 {
310     if (UINTPTR_MAX == UINT32_MAX) {
311         /* mov_i32 */
312         op = copy_op(begin_op, op, INDEX_op_mov_i32);
313         op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr));
314     } else {
315         /* mov_i64 */
316         op = copy_op(begin_op, op, INDEX_op_mov_i64);
317         op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr));
318     }
319     return op;
320 }
321 
322 static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op)
323 {
324     if (TARGET_LONG_BITS == 32) {
325         /* extu_i32_i64 */
326         op = copy_extu_i32_i64(begin_op, op);
327     } else {
328         /* mov_i64 */
329         op = copy_mov_i64(begin_op, op);
330     }
331     return op;
332 }
333 
334 static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
335 {
336     if (TCG_TARGET_REG_BITS == 32) {
337         /* 2x ld_i32 */
338         op = copy_op(begin_op, op, INDEX_op_ld_i32);
339         op = copy_op(begin_op, op, INDEX_op_ld_i32);
340     } else {
341         /* ld_i64 */
342         op = copy_op(begin_op, op, INDEX_op_ld_i64);
343     }
344     return op;
345 }
346 
347 static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op)
348 {
349     if (TCG_TARGET_REG_BITS == 32) {
350         /* 2x st_i32 */
351         op = copy_op(begin_op, op, INDEX_op_st_i32);
352         op = copy_op(begin_op, op, INDEX_op_st_i32);
353     } else {
354         /* st_i64 */
355         op = copy_op(begin_op, op, INDEX_op_st_i64);
356     }
357     return op;
358 }
359 
360 static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
361 {
362     if (TCG_TARGET_REG_BITS == 32) {
363         /* all 32-bit backends must implement add2_i32 */
364         g_assert(TCG_TARGET_HAS_add2_i32);
365         op = copy_op(begin_op, op, INDEX_op_add2_i32);
366         op->args[4] = tcgv_i32_arg(tcg_constant_i32(v));
367         op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32));
368     } else {
369         op = copy_op(begin_op, op, INDEX_op_add_i64);
370         op->args[2] = tcgv_i64_arg(tcg_constant_i64(v));
371     }
372     return op;
373 }
374 
375 static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
376 {
377     if (UINTPTR_MAX == UINT32_MAX) {
378         /* st_i32 */
379         op = copy_op(begin_op, op, INDEX_op_st_i32);
380     } else {
381         /* st_i64 */
382         op = copy_st_i64(begin_op, op);
383     }
384     return op;
385 }
386 
387 static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func,
388                         void *func, unsigned tcg_flags, int *cb_idx)
389 {
390     /* copy all ops until the call */
391     do {
392         op = copy_op_nocheck(begin_op, op);
393     } while (op->opc != INDEX_op_call);
394 
395     /* fill in the op call */
396     op->param1 = (*begin_op)->param1;
397     op->param2 = (*begin_op)->param2;
398     tcg_debug_assert(op->life == 0);
399     if (*cb_idx == -1) {
400         int i;
401 
402         /*
403          * Instead of working out the position of the callback in args[], just
404          * look for @empty_func, since it should be a unique pointer.
405          */
406         for (i = 0; i < MAX_OPC_PARAM_ARGS; i++) {
407             if ((uintptr_t)(*begin_op)->args[i] == (uintptr_t)empty_func) {
408                 *cb_idx = i;
409                 break;
410             }
411         }
412         tcg_debug_assert(i < MAX_OPC_PARAM_ARGS);
413     }
414     op->args[*cb_idx] = (uintptr_t)func;
415     op->args[*cb_idx + 1] = tcg_flags;
416 
417     return op;
418 }
419 
420 /*
421  * When we append/replace ops here we are sensitive to changing patterns of
422  * TCGOps generated by the tcg_gen_FOO calls when we generated the
423  * empty callbacks. This will assert very quickly in a debug build as
424  * we assert the ops we are replacing are the correct ones.
425  */
426 static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
427                               TCGOp *begin_op, TCGOp *op, int *cb_idx)
428 {
429     /* const_ptr */
430     op = copy_const_ptr(&begin_op, op, cb->userp);
431 
432     /* copy the ld_i32, but note that we only have to copy it once */
433     begin_op = QTAILQ_NEXT(begin_op, link);
434     tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
435     if (*cb_idx == -1) {
436         op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32);
437         memcpy(op->args, begin_op->args, sizeof(op->args));
438     }
439 
440     /* call */
441     op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb),
442                    cb->f.vcpu_udata, cb->tcg_flags, cb_idx);
443 
444     return op;
445 }
446 
447 static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
448                                TCGOp *begin_op, TCGOp *op,
449                                int *unused)
450 {
451     /* const_ptr */
452     op = copy_const_ptr(&begin_op, op, cb->userp);
453 
454     /* ld_i64 */
455     op = copy_ld_i64(&begin_op, op);
456 
457     /* add_i64 */
458     op = copy_add_i64(&begin_op, op, cb->inline_insn.imm);
459 
460     /* st_i64 */
461     op = copy_st_i64(&begin_op, op);
462 
463     return op;
464 }
465 
466 static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
467                             TCGOp *begin_op, TCGOp *op, int *cb_idx)
468 {
469     enum plugin_gen_cb type = begin_op->args[1];
470 
471     tcg_debug_assert(type == PLUGIN_GEN_CB_MEM);
472 
473     /* const_i32 == mov_i32 ("info", so it remains as is) */
474     op = copy_op(&begin_op, op, INDEX_op_mov_i32);
475 
476     /* const_ptr */
477     op = copy_const_ptr(&begin_op, op, cb->userp);
478 
479     /* copy the ld_i32, but note that we only have to copy it once */
480     begin_op = QTAILQ_NEXT(begin_op, link);
481     tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
482     if (*cb_idx == -1) {
483         op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32);
484         memcpy(op->args, begin_op->args, sizeof(op->args));
485     }
486 
487     /* extu_tl_i64 */
488     op = copy_extu_tl_i64(&begin_op, op);
489 
490     if (type == PLUGIN_GEN_CB_MEM) {
491         /* call */
492         op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb),
493                        cb->f.vcpu_udata, cb->tcg_flags, cb_idx);
494     }
495 
496     return op;
497 }
498 
499 typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb,
500                             TCGOp *begin_op, TCGOp *op, int *intp);
501 typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb);
502 
503 static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
504 {
505     return true;
506 }
507 
508 static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
509 {
510     int w;
511 
512     w = op->args[2];
513     return !!(cb->rw & (w + 1));
514 }
515 
516 static inline
517 void inject_cb_type(const GArray *cbs, TCGOp *begin_op, inject_fn inject,
518                     op_ok_fn ok)
519 {
520     TCGOp *end_op;
521     TCGOp *op;
522     int cb_idx = -1;
523     int i;
524 
525     if (!cbs || cbs->len == 0) {
526         rm_ops(begin_op);
527         return;
528     }
529 
530     end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
531     tcg_debug_assert(end_op);
532 
533     op = end_op;
534     for (i = 0; i < cbs->len; i++) {
535         struct qemu_plugin_dyn_cb *cb =
536             &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
537 
538         if (!ok(begin_op, cb)) {
539             continue;
540         }
541         op = inject(cb, begin_op, op, &cb_idx);
542     }
543     rm_ops_range(begin_op, end_op);
544 }
545 
546 static void
547 inject_udata_cb(const GArray *cbs, TCGOp *begin_op)
548 {
549     inject_cb_type(cbs, begin_op, append_udata_cb, op_ok);
550 }
551 
552 static void
553 inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok)
554 {
555     inject_cb_type(cbs, begin_op, append_inline_cb, ok);
556 }
557 
558 static void
559 inject_mem_cb(const GArray *cbs, TCGOp *begin_op)
560 {
561     inject_cb_type(cbs, begin_op, append_mem_cb, op_rw);
562 }
563 
564 /* we could change the ops in place, but we can reuse more code by copying */
565 static void inject_mem_helper(TCGOp *begin_op, GArray *arr)
566 {
567     TCGOp *orig_op = begin_op;
568     TCGOp *end_op;
569     TCGOp *op;
570 
571     end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
572     tcg_debug_assert(end_op);
573 
574     /* const ptr */
575     op = copy_const_ptr(&begin_op, end_op, arr);
576 
577     /* st_ptr */
578     op = copy_st_ptr(&begin_op, op);
579 
580     rm_ops_range(orig_op, end_op);
581 }
582 
583 /*
584  * Tracking memory accesses performed from helpers requires extra work.
585  * If an instruction is emulated with helpers, we do two things:
586  * (1) copy the CB descriptors, and keep track of it so that they can be
587  * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so
588  * that we can read them at run-time (i.e. when the helper executes).
589  * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
590  *
591  * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
592  * is possible that the code we generate after the instruction is
593  * dead, we also add checks before generating tb_exit etc.
594  */
595 static void inject_mem_enable_helper(struct qemu_plugin_insn *plugin_insn,
596                                      TCGOp *begin_op)
597 {
598     GArray *cbs[2];
599     GArray *arr;
600     size_t n_cbs, i;
601 
602     cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
603     cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
604 
605     n_cbs = 0;
606     for (i = 0; i < ARRAY_SIZE(cbs); i++) {
607         n_cbs += cbs[i]->len;
608     }
609 
610     plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs;
611     if (likely(!plugin_insn->mem_helper)) {
612         rm_ops(begin_op);
613         return;
614     }
615 
616     arr = g_array_sized_new(false, false,
617                             sizeof(struct qemu_plugin_dyn_cb), n_cbs);
618 
619     for (i = 0; i < ARRAY_SIZE(cbs); i++) {
620         g_array_append_vals(arr, cbs[i]->data, cbs[i]->len);
621     }
622 
623     qemu_plugin_add_dyn_cb_arr(arr);
624     inject_mem_helper(begin_op, arr);
625 }
626 
627 static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn,
628                                       TCGOp *begin_op)
629 {
630     if (likely(!plugin_insn->mem_helper)) {
631         rm_ops(begin_op);
632         return;
633     }
634     inject_mem_helper(begin_op, NULL);
635 }
636 
637 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
638 void plugin_gen_disable_mem_helpers(void)
639 {
640     TCGv_ptr ptr;
641 
642     if (likely(tcg_ctx->plugin_insn == NULL ||
643                !tcg_ctx->plugin_insn->mem_helper)) {
644         return;
645     }
646     ptr = tcg_const_ptr(NULL);
647     tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
648                                  offsetof(ArchCPU, env));
649     tcg_temp_free_ptr(ptr);
650     tcg_ctx->plugin_insn->mem_helper = false;
651 }
652 
653 static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,
654                                 TCGOp *begin_op)
655 {
656     inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op);
657 }
658 
659 static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb,
660                                  TCGOp *begin_op)
661 {
662     inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok);
663 }
664 
665 static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
666                                   TCGOp *begin_op, int insn_idx)
667 {
668     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
669 
670     inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op);
671 }
672 
673 static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb,
674                                    TCGOp *begin_op, int insn_idx)
675 {
676     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
677     inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE],
678                      begin_op, op_ok);
679 }
680 
681 static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb,
682                                    TCGOp *begin_op, int insn_idx)
683 {
684     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
685     inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op);
686 }
687 
688 static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb,
689                                   TCGOp *begin_op, int insn_idx)
690 {
691     const GArray *cbs;
692     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
693 
694     cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
695     inject_inline_cb(cbs, begin_op, op_rw);
696 }
697 
698 static void plugin_gen_enable_mem_helper(const struct qemu_plugin_tb *ptb,
699                                          TCGOp *begin_op, int insn_idx)
700 {
701     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
702     inject_mem_enable_helper(insn, begin_op);
703 }
704 
705 static void plugin_gen_disable_mem_helper(const struct qemu_plugin_tb *ptb,
706                                           TCGOp *begin_op, int insn_idx)
707 {
708     struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
709     inject_mem_disable_helper(insn, begin_op);
710 }
711 
712 static void plugin_inject_cb(const struct qemu_plugin_tb *ptb, TCGOp *begin_op,
713                              int insn_idx)
714 {
715     enum plugin_gen_from from = begin_op->args[0];
716     enum plugin_gen_cb type = begin_op->args[1];
717 
718     switch (from) {
719     case PLUGIN_GEN_FROM_TB:
720         switch (type) {
721         case PLUGIN_GEN_CB_UDATA:
722             plugin_gen_tb_udata(ptb, begin_op);
723             return;
724         case PLUGIN_GEN_CB_INLINE:
725             plugin_gen_tb_inline(ptb, begin_op);
726             return;
727         default:
728             g_assert_not_reached();
729         }
730     case PLUGIN_GEN_FROM_INSN:
731         switch (type) {
732         case PLUGIN_GEN_CB_UDATA:
733             plugin_gen_insn_udata(ptb, begin_op, insn_idx);
734             return;
735         case PLUGIN_GEN_CB_INLINE:
736             plugin_gen_insn_inline(ptb, begin_op, insn_idx);
737             return;
738         case PLUGIN_GEN_ENABLE_MEM_HELPER:
739             plugin_gen_enable_mem_helper(ptb, begin_op, insn_idx);
740             return;
741         default:
742             g_assert_not_reached();
743         }
744     case PLUGIN_GEN_FROM_MEM:
745         switch (type) {
746         case PLUGIN_GEN_CB_MEM:
747             plugin_gen_mem_regular(ptb, begin_op, insn_idx);
748             return;
749         case PLUGIN_GEN_CB_INLINE:
750             plugin_gen_mem_inline(ptb, begin_op, insn_idx);
751             return;
752         default:
753             g_assert_not_reached();
754         }
755     case PLUGIN_GEN_AFTER_INSN:
756         switch (type) {
757         case PLUGIN_GEN_DISABLE_MEM_HELPER:
758             plugin_gen_disable_mem_helper(ptb, begin_op, insn_idx);
759             return;
760         default:
761             g_assert_not_reached();
762         }
763     default:
764         g_assert_not_reached();
765     }
766 }
767 
768 /* #define DEBUG_PLUGIN_GEN_OPS */
769 static void pr_ops(void)
770 {
771 #ifdef DEBUG_PLUGIN_GEN_OPS
772     TCGOp *op;
773     int i = 0;
774 
775     QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
776         const char *name = "";
777         const char *type = "";
778 
779         if (op->opc == INDEX_op_plugin_cb_start) {
780             switch (op->args[0]) {
781             case PLUGIN_GEN_FROM_TB:
782                 name = "tb";
783                 break;
784             case PLUGIN_GEN_FROM_INSN:
785                 name = "insn";
786                 break;
787             case PLUGIN_GEN_FROM_MEM:
788                 name = "mem";
789                 break;
790             case PLUGIN_GEN_AFTER_INSN:
791                 name = "after insn";
792                 break;
793             default:
794                 break;
795             }
796             switch (op->args[1]) {
797             case PLUGIN_GEN_CB_UDATA:
798                 type = "udata";
799                 break;
800             case PLUGIN_GEN_CB_INLINE:
801                 type = "inline";
802                 break;
803             case PLUGIN_GEN_CB_MEM:
804                 type = "mem";
805                 break;
806             case PLUGIN_GEN_ENABLE_MEM_HELPER:
807                 type = "enable mem helper";
808                 break;
809             case PLUGIN_GEN_DISABLE_MEM_HELPER:
810                 type = "disable mem helper";
811                 break;
812             default:
813                 break;
814             }
815         }
816         printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type);
817         i++;
818     }
819 #endif
820 }
821 
822 static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb)
823 {
824     TCGOp *op;
825     int insn_idx;
826 
827     pr_ops();
828     insn_idx = -1;
829     QSIMPLEQ_FOREACH(op, &tcg_ctx->plugin_ops, plugin_link) {
830         enum plugin_gen_from from = op->args[0];
831         enum plugin_gen_cb type = op->args[1];
832 
833         tcg_debug_assert(op->opc == INDEX_op_plugin_cb_start);
834         /* ENABLE_MEM_HELPER is the first callback of an instruction */
835         if (from == PLUGIN_GEN_FROM_INSN &&
836             type == PLUGIN_GEN_ENABLE_MEM_HELPER) {
837             insn_idx++;
838         }
839         plugin_inject_cb(plugin_tb, op, insn_idx);
840     }
841     pr_ops();
842 }
843 
844 bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_only)
845 {
846     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
847     bool ret = false;
848 
849     if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) {
850         ret = true;
851 
852         QSIMPLEQ_INIT(&tcg_ctx->plugin_ops);
853         ptb->vaddr = tb->pc;
854         ptb->vaddr2 = -1;
855         get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1);
856         ptb->haddr2 = NULL;
857         ptb->mem_only = mem_only;
858 
859         plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB);
860     }
861     return ret;
862 }
863 
864 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
865 {
866     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
867     struct qemu_plugin_insn *pinsn;
868 
869     pinsn = qemu_plugin_tb_insn_get(ptb);
870     tcg_ctx->plugin_insn = pinsn;
871     pinsn->vaddr = db->pc_next;
872     plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN);
873 
874     /*
875      * Detect page crossing to get the new host address.
876      * Note that we skip this when haddr1 == NULL, e.g. when we're
877      * fetching instructions from a region not backed by RAM.
878      */
879     if (likely(ptb->haddr1 != NULL && ptb->vaddr2 == -1) &&
880         unlikely((db->pc_next & TARGET_PAGE_MASK) !=
881                  (db->pc_first & TARGET_PAGE_MASK))) {
882         get_page_addr_code_hostp(cpu->env_ptr, db->pc_next,
883                                  &ptb->haddr2);
884         ptb->vaddr2 = db->pc_next;
885     }
886     if (likely(ptb->vaddr2 == -1)) {
887         pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
888     } else {
889         pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
890     }
891 }
892 
893 void plugin_gen_insn_end(void)
894 {
895     plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN);
896 }
897 
898 void plugin_gen_tb_end(CPUState *cpu)
899 {
900     struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
901     int i;
902 
903     /* collect instrumentation requests */
904     qemu_plugin_tb_trans_cb(cpu, ptb);
905 
906     /* inject the instrumentation at the appropriate places */
907     plugin_gen_inject(ptb);
908 
909     /* clean up */
910     for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) {
911         if (ptb->cbs[i]) {
912             g_array_set_size(ptb->cbs[i], 0);
913         }
914     }
915     ptb->n = 0;
916     tcg_ctx->plugin_insn = NULL;
917 }
918