xref: /qemu/plugins/core.c (revision 7c0dfcf9)
1 /*
2  * QEMU Plugin Core code
3  *
4  * This is the core code that deals with injecting instrumentation into the code
5  *
6  * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
7  * Copyright (C) 2019, Linaro
8  *
9  * License: GNU GPL, version 2 or later.
10  *   See the COPYING file in the top-level directory.
11  *
12  * SPDX-License-Identifier: GPL-2.0-or-later
13  */
14 #include "qemu/osdep.h"
15 #include "qemu/error-report.h"
16 #include "qemu/config-file.h"
17 #include "qapi/error.h"
18 #include "qemu/lockable.h"
19 #include "qemu/option.h"
20 #include "qemu/rcu_queue.h"
21 #include "qemu/xxhash.h"
22 #include "qemu/rcu.h"
23 #include "hw/core/cpu.h"
24 
25 #include "exec/exec-all.h"
26 #include "exec/tb-flush.h"
27 #include "tcg/tcg.h"
28 #include "tcg/tcg-op.h"
29 #include "plugin.h"
30 
31 struct qemu_plugin_cb {
32     struct qemu_plugin_ctx *ctx;
33     union qemu_plugin_cb_sig f;
34     void *udata;
35     QLIST_ENTRY(qemu_plugin_cb) entry;
36 };
37 
38 struct qemu_plugin_state plugin;
39 
40 struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id)
41 {
42     struct qemu_plugin_ctx *ctx;
43     qemu_plugin_id_t *id_p;
44 
45     id_p = g_hash_table_lookup(plugin.id_ht, &id);
46     ctx = container_of(id_p, struct qemu_plugin_ctx, id);
47     if (ctx == NULL) {
48         error_report("plugin: invalid plugin id %" PRIu64, id);
49         abort();
50     }
51     return ctx;
52 }
53 
54 static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data)
55 {
56     bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX);
57     tcg_flush_jmp_cache(cpu);
58 }
59 
60 static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata)
61 {
62     CPUState *cpu = container_of(k, CPUState, cpu_index);
63     run_on_cpu_data mask = RUN_ON_CPU_HOST_ULONG(*plugin.mask);
64 
65     if (DEVICE(cpu)->realized) {
66         async_run_on_cpu(cpu, plugin_cpu_update__async, mask);
67     } else {
68         plugin_cpu_update__async(cpu, mask);
69     }
70 }
71 
72 void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx,
73                                   enum qemu_plugin_event ev)
74 {
75     struct qemu_plugin_cb *cb = ctx->callbacks[ev];
76 
77     if (cb == NULL) {
78         return;
79     }
80     QLIST_REMOVE_RCU(cb, entry);
81     g_free(cb);
82     ctx->callbacks[ev] = NULL;
83     if (QLIST_EMPTY_RCU(&plugin.cb_lists[ev])) {
84         clear_bit(ev, plugin.mask);
85         g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, NULL);
86     }
87 }
88 
89 /*
90  * Disable CFI checks.
91  * The callback function has been loaded from an external library so we do not
92  * have type information
93  */
94 QEMU_DISABLE_CFI
95 static void plugin_vcpu_cb__simple(CPUState *cpu, enum qemu_plugin_event ev)
96 {
97     struct qemu_plugin_cb *cb, *next;
98 
99     switch (ev) {
100     case QEMU_PLUGIN_EV_VCPU_INIT:
101     case QEMU_PLUGIN_EV_VCPU_EXIT:
102     case QEMU_PLUGIN_EV_VCPU_IDLE:
103     case QEMU_PLUGIN_EV_VCPU_RESUME:
104         /* iterate safely; plugins might uninstall themselves at any time */
105         QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
106             qemu_plugin_vcpu_simple_cb_t func = cb->f.vcpu_simple;
107 
108             func(cb->ctx->id, cpu->cpu_index);
109         }
110         break;
111     default:
112         g_assert_not_reached();
113     }
114 }
115 
116 /*
117  * Disable CFI checks.
118  * The callback function has been loaded from an external library so we do not
119  * have type information
120  */
121 QEMU_DISABLE_CFI
122 static void plugin_cb__simple(enum qemu_plugin_event ev)
123 {
124     struct qemu_plugin_cb *cb, *next;
125 
126     switch (ev) {
127     case QEMU_PLUGIN_EV_FLUSH:
128         QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
129             qemu_plugin_simple_cb_t func = cb->f.simple;
130 
131             func(cb->ctx->id);
132         }
133         break;
134     default:
135         g_assert_not_reached();
136     }
137 }
138 
139 /*
140  * Disable CFI checks.
141  * The callback function has been loaded from an external library so we do not
142  * have type information
143  */
144 QEMU_DISABLE_CFI
145 static void plugin_cb__udata(enum qemu_plugin_event ev)
146 {
147     struct qemu_plugin_cb *cb, *next;
148 
149     switch (ev) {
150     case QEMU_PLUGIN_EV_ATEXIT:
151         QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
152             qemu_plugin_udata_cb_t func = cb->f.udata;
153 
154             func(cb->ctx->id, cb->udata);
155         }
156         break;
157     default:
158         g_assert_not_reached();
159     }
160 }
161 
162 static void
163 do_plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev,
164                       void *func, void *udata)
165 {
166     struct qemu_plugin_ctx *ctx;
167 
168     QEMU_LOCK_GUARD(&plugin.lock);
169     ctx = plugin_id_to_ctx_locked(id);
170     /* if the plugin is on its way out, ignore this request */
171     if (unlikely(ctx->uninstalling)) {
172         return;
173     }
174     if (func) {
175         struct qemu_plugin_cb *cb = ctx->callbacks[ev];
176 
177         if (cb) {
178             cb->f.generic = func;
179             cb->udata = udata;
180         } else {
181             cb = g_new(struct qemu_plugin_cb, 1);
182             cb->ctx = ctx;
183             cb->f.generic = func;
184             cb->udata = udata;
185             ctx->callbacks[ev] = cb;
186             QLIST_INSERT_HEAD_RCU(&plugin.cb_lists[ev], cb, entry);
187             if (!test_bit(ev, plugin.mask)) {
188                 set_bit(ev, plugin.mask);
189                 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked,
190                                      NULL);
191             }
192         }
193     } else {
194         plugin_unregister_cb__locked(ctx, ev);
195     }
196 }
197 
198 void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev,
199                         void *func)
200 {
201     do_plugin_register_cb(id, ev, func, NULL);
202 }
203 
204 void
205 plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev,
206                          void *func, void *udata)
207 {
208     do_plugin_register_cb(id, ev, func, udata);
209 }
210 
211 void qemu_plugin_vcpu_init_hook(CPUState *cpu)
212 {
213     bool success;
214 
215     qemu_rec_mutex_lock(&plugin.lock);
216     plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL);
217     success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index,
218                                   &cpu->cpu_index);
219     g_assert(success);
220     qemu_rec_mutex_unlock(&plugin.lock);
221 
222     plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT);
223 }
224 
225 void qemu_plugin_vcpu_exit_hook(CPUState *cpu)
226 {
227     bool success;
228 
229     plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT);
230 
231     qemu_rec_mutex_lock(&plugin.lock);
232     success = g_hash_table_remove(plugin.cpu_ht, &cpu->cpu_index);
233     g_assert(success);
234     qemu_rec_mutex_unlock(&plugin.lock);
235 }
236 
237 struct plugin_for_each_args {
238     struct qemu_plugin_ctx *ctx;
239     qemu_plugin_vcpu_simple_cb_t cb;
240 };
241 
242 static void plugin_vcpu_for_each(gpointer k, gpointer v, gpointer udata)
243 {
244     struct plugin_for_each_args *args = udata;
245     int cpu_index = *(int *)k;
246 
247     args->cb(args->ctx->id, cpu_index);
248 }
249 
250 void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id,
251                                qemu_plugin_vcpu_simple_cb_t cb)
252 {
253     struct plugin_for_each_args args;
254 
255     if (cb == NULL) {
256         return;
257     }
258     qemu_rec_mutex_lock(&plugin.lock);
259     args.ctx = plugin_id_to_ctx_locked(id);
260     args.cb = cb;
261     g_hash_table_foreach(plugin.cpu_ht, plugin_vcpu_for_each, &args);
262     qemu_rec_mutex_unlock(&plugin.lock);
263 }
264 
265 /* Allocate and return a callback record */
266 static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr)
267 {
268     GArray *cbs = *arr;
269 
270     if (!cbs) {
271         cbs = g_array_sized_new(false, false,
272                                 sizeof(struct qemu_plugin_dyn_cb), 1);
273         *arr = cbs;
274     }
275 
276     g_array_set_size(cbs, cbs->len + 1);
277     return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1);
278 }
279 
280 void plugin_register_inline_op(GArray **arr,
281                                enum qemu_plugin_mem_rw rw,
282                                enum qemu_plugin_op op, void *ptr,
283                                uint64_t imm)
284 {
285     struct qemu_plugin_dyn_cb *dyn_cb;
286 
287     dyn_cb = plugin_get_dyn_cb(arr);
288     dyn_cb->userp = ptr;
289     dyn_cb->type = PLUGIN_CB_INLINE;
290     dyn_cb->rw = rw;
291     dyn_cb->inline_insn.op = op;
292     dyn_cb->inline_insn.imm = imm;
293 }
294 
295 void plugin_register_dyn_cb__udata(GArray **arr,
296                                    qemu_plugin_vcpu_udata_cb_t cb,
297                                    enum qemu_plugin_cb_flags flags,
298                                    void *udata)
299 {
300     struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr);
301 
302     dyn_cb->userp = udata;
303     /* Note flags are discarded as unused. */
304     dyn_cb->f.vcpu_udata = cb;
305     dyn_cb->type = PLUGIN_CB_REGULAR;
306 }
307 
308 void plugin_register_vcpu_mem_cb(GArray **arr,
309                                  void *cb,
310                                  enum qemu_plugin_cb_flags flags,
311                                  enum qemu_plugin_mem_rw rw,
312                                  void *udata)
313 {
314     struct qemu_plugin_dyn_cb *dyn_cb;
315 
316     dyn_cb = plugin_get_dyn_cb(arr);
317     dyn_cb->userp = udata;
318     /* Note flags are discarded as unused. */
319     dyn_cb->type = PLUGIN_CB_REGULAR;
320     dyn_cb->rw = rw;
321     dyn_cb->f.generic = cb;
322 }
323 
324 /*
325  * Disable CFI checks.
326  * The callback function has been loaded from an external library so we do not
327  * have type information
328  */
329 QEMU_DISABLE_CFI
330 void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb)
331 {
332     struct qemu_plugin_cb *cb, *next;
333     enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_TB_TRANS;
334 
335     /* no plugin_mask check here; caller should have checked */
336 
337     QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
338         qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans;
339 
340         func(cb->ctx->id, tb);
341     }
342 }
343 
344 /*
345  * Disable CFI checks.
346  * The callback function has been loaded from an external library so we do not
347  * have type information
348  */
349 QEMU_DISABLE_CFI
350 void
351 qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2,
352                          uint64_t a3, uint64_t a4, uint64_t a5,
353                          uint64_t a6, uint64_t a7, uint64_t a8)
354 {
355     struct qemu_plugin_cb *cb, *next;
356     enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL;
357 
358     if (!test_bit(ev, cpu->plugin_mask)) {
359         return;
360     }
361 
362     QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
363         qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall;
364 
365         func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8);
366     }
367 }
368 
369 /*
370  * Disable CFI checks.
371  * The callback function has been loaded from an external library so we do not
372  * have type information
373  */
374 QEMU_DISABLE_CFI
375 void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret)
376 {
377     struct qemu_plugin_cb *cb, *next;
378     enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET;
379 
380     if (!test_bit(ev, cpu->plugin_mask)) {
381         return;
382     }
383 
384     QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
385         qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret;
386 
387         func(cb->ctx->id, cpu->cpu_index, num, ret);
388     }
389 }
390 
391 void qemu_plugin_vcpu_idle_cb(CPUState *cpu)
392 {
393     plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE);
394 }
395 
396 void qemu_plugin_vcpu_resume_cb(CPUState *cpu)
397 {
398     plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME);
399 }
400 
401 void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id,
402                                        qemu_plugin_vcpu_simple_cb_t cb)
403 {
404     plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_IDLE, cb);
405 }
406 
407 void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id,
408                                          qemu_plugin_vcpu_simple_cb_t cb)
409 {
410     plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_RESUME, cb);
411 }
412 
413 void qemu_plugin_register_flush_cb(qemu_plugin_id_t id,
414                                    qemu_plugin_simple_cb_t cb)
415 {
416     plugin_register_cb(id, QEMU_PLUGIN_EV_FLUSH, cb);
417 }
418 
419 static bool free_dyn_cb_arr(void *p, uint32_t h, void *userp)
420 {
421     g_array_free((GArray *) p, true);
422     return true;
423 }
424 
425 void qemu_plugin_flush_cb(void)
426 {
427     qht_iter_remove(&plugin.dyn_cb_arr_ht, free_dyn_cb_arr, NULL);
428     qht_reset(&plugin.dyn_cb_arr_ht);
429 
430     plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH);
431 }
432 
433 void exec_inline_op(struct qemu_plugin_dyn_cb *cb)
434 {
435     uint64_t *val = cb->userp;
436 
437     switch (cb->inline_insn.op) {
438     case QEMU_PLUGIN_INLINE_ADD_U64:
439         *val += cb->inline_insn.imm;
440         break;
441     default:
442         g_assert_not_reached();
443     }
444 }
445 
446 void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
447                              MemOpIdx oi, enum qemu_plugin_mem_rw rw)
448 {
449     GArray *arr = cpu->plugin_mem_cbs;
450     size_t i;
451 
452     if (arr == NULL) {
453         return;
454     }
455     for (i = 0; i < arr->len; i++) {
456         struct qemu_plugin_dyn_cb *cb =
457             &g_array_index(arr, struct qemu_plugin_dyn_cb, i);
458 
459         if (!(rw & cb->rw)) {
460                 break;
461         }
462         switch (cb->type) {
463         case PLUGIN_CB_REGULAR:
464             cb->f.vcpu_mem(cpu->cpu_index, make_plugin_meminfo(oi, rw),
465                            vaddr, cb->userp);
466             break;
467         case PLUGIN_CB_INLINE:
468             exec_inline_op(cb);
469             break;
470         default:
471             g_assert_not_reached();
472         }
473     }
474 }
475 
476 void qemu_plugin_atexit_cb(void)
477 {
478     plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT);
479 }
480 
481 void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id,
482                                     qemu_plugin_udata_cb_t cb,
483                                     void *udata)
484 {
485     plugin_register_cb_udata(id, QEMU_PLUGIN_EV_ATEXIT, cb, udata);
486 }
487 
488 /*
489  * Handle exit from linux-user. Unlike the normal atexit() mechanism
490  * we need to handle the clean-up manually as it's possible threads
491  * are still running. We need to remove all callbacks from code
492  * generation, flush the current translations and then we can safely
493  * trigger the exit callbacks.
494  */
495 
496 void qemu_plugin_user_exit(void)
497 {
498     enum qemu_plugin_event ev;
499     CPUState *cpu;
500 
501     /*
502      * Locking order: we must acquire locks in an order that is consistent
503      * with the one in fork_start(). That is:
504      * - start_exclusive(), which acquires qemu_cpu_list_lock,
505      *   must be called before acquiring plugin.lock.
506      * - tb_flush(), which acquires mmap_lock(), must be called
507      *   while plugin.lock is not held.
508      */
509     start_exclusive();
510 
511     qemu_rec_mutex_lock(&plugin.lock);
512     /* un-register all callbacks except the final AT_EXIT one */
513     for (ev = 0; ev < QEMU_PLUGIN_EV_MAX; ev++) {
514         if (ev != QEMU_PLUGIN_EV_ATEXIT) {
515             struct qemu_plugin_cb *cb, *next;
516 
517             QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
518                 plugin_unregister_cb__locked(cb->ctx, ev);
519             }
520         }
521     }
522     CPU_FOREACH(cpu) {
523         qemu_plugin_disable_mem_helpers(cpu);
524     }
525     qemu_rec_mutex_unlock(&plugin.lock);
526 
527     tb_flush(current_cpu);
528     end_exclusive();
529 
530     /* now it's safe to handle the exit case */
531     qemu_plugin_atexit_cb();
532 }
533 
534 /*
535  * Helpers for *-user to ensure locks are sane across fork() events.
536  */
537 
538 void qemu_plugin_user_prefork_lock(void)
539 {
540     qemu_rec_mutex_lock(&plugin.lock);
541 }
542 
543 void qemu_plugin_user_postfork(bool is_child)
544 {
545     if (is_child) {
546         /* should we just reset via plugin_init? */
547         qemu_rec_mutex_init(&plugin.lock);
548     } else {
549         qemu_rec_mutex_unlock(&plugin.lock);
550     }
551 }
552 
553 static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp)
554 {
555     return ap == bp;
556 }
557 
558 static void __attribute__((__constructor__)) plugin_init(void)
559 {
560     int i;
561 
562     for (i = 0; i < QEMU_PLUGIN_EV_MAX; i++) {
563         QLIST_INIT(&plugin.cb_lists[i]);
564     }
565     qemu_rec_mutex_init(&plugin.lock);
566     plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal);
567     plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal);
568     QTAILQ_INIT(&plugin.ctxs);
569     qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16,
570              QHT_MODE_AUTO_RESIZE);
571     atexit(qemu_plugin_atexit_cb);
572 }
573