xref: /qemu/iothread.c (revision 0955d66e)
1 /*
2  * Event loop thread
3  *
4  * Copyright Red Hat Inc., 2013, 2020
5  *
6  * Authors:
7  *  Stefan Hajnoczi   <stefanha@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qom/object.h"
16 #include "qom/object_interfaces.h"
17 #include "qemu/module.h"
18 #include "block/aio.h"
19 #include "block/block.h"
20 #include "sysemu/iothread.h"
21 #include "qapi/error.h"
22 #include "qapi/qapi-commands-misc.h"
23 #include "qemu/error-report.h"
24 #include "qemu/rcu.h"
25 #include "qemu/main-loop.h"
26 
27 typedef ObjectClass IOThreadClass;
28 
29 DECLARE_CLASS_CHECKERS(IOThreadClass, IOTHREAD,
30                        TYPE_IOTHREAD)
31 
32 #ifdef CONFIG_POSIX
33 /* Benchmark results from 2016 on NVMe SSD drives show max polling times around
34  * 16-32 microseconds yield IOPS improvements for both iodepth=1 and iodepth=32
35  * workloads.
36  */
37 #define IOTHREAD_POLL_MAX_NS_DEFAULT 32768ULL
38 #else
39 #define IOTHREAD_POLL_MAX_NS_DEFAULT 0ULL
40 #endif
41 
42 static void *iothread_run(void *opaque)
43 {
44     IOThread *iothread = opaque;
45 
46     rcu_register_thread();
47     /*
48      * g_main_context_push_thread_default() must be called before anything
49      * in this new thread uses glib.
50      */
51     g_main_context_push_thread_default(iothread->worker_context);
52     qemu_set_current_aio_context(iothread->ctx);
53     iothread->thread_id = qemu_get_thread_id();
54     qemu_sem_post(&iothread->init_done_sem);
55 
56     while (iothread->running) {
57         /*
58          * Note: from functional-wise the g_main_loop_run() below can
59          * already cover the aio_poll() events, but we can't run the
60          * main loop unconditionally because explicit aio_poll() here
61          * is faster than g_main_loop_run() when we do not need the
62          * gcontext at all (e.g., pure block layer iothreads).  In
63          * other words, when we want to run the gcontext with the
64          * iothread we need to pay some performance for functionality.
65          */
66         aio_poll(iothread->ctx, true);
67 
68         /*
69          * We must check the running state again in case it was
70          * changed in previous aio_poll()
71          */
72         if (iothread->running && qatomic_read(&iothread->run_gcontext)) {
73             g_main_loop_run(iothread->main_loop);
74         }
75     }
76 
77     g_main_context_pop_thread_default(iothread->worker_context);
78     rcu_unregister_thread();
79     return NULL;
80 }
81 
82 /* Runs in iothread_run() thread */
83 static void iothread_stop_bh(void *opaque)
84 {
85     IOThread *iothread = opaque;
86 
87     iothread->running = false; /* stop iothread_run() */
88 
89     if (iothread->main_loop) {
90         g_main_loop_quit(iothread->main_loop);
91     }
92 }
93 
94 void iothread_stop(IOThread *iothread)
95 {
96     if (!iothread->ctx || iothread->stopping) {
97         return;
98     }
99     iothread->stopping = true;
100     aio_bh_schedule_oneshot(iothread->ctx, iothread_stop_bh, iothread);
101     qemu_thread_join(&iothread->thread);
102 }
103 
104 static void iothread_instance_init(Object *obj)
105 {
106     IOThread *iothread = IOTHREAD(obj);
107 
108     iothread->poll_max_ns = IOTHREAD_POLL_MAX_NS_DEFAULT;
109     iothread->thread_id = -1;
110     qemu_sem_init(&iothread->init_done_sem, 0);
111     /* By default, we don't run gcontext */
112     qatomic_set(&iothread->run_gcontext, 0);
113 }
114 
115 static void iothread_instance_finalize(Object *obj)
116 {
117     IOThread *iothread = IOTHREAD(obj);
118 
119     iothread_stop(iothread);
120 
121     /*
122      * Before glib2 2.33.10, there is a glib2 bug that GSource context
123      * pointer may not be cleared even if the context has already been
124      * destroyed (while it should).  Here let's free the AIO context
125      * earlier to bypass that glib bug.
126      *
127      * We can remove this comment after the minimum supported glib2
128      * version boosts to 2.33.10.  Before that, let's free the
129      * GSources first before destroying any GMainContext.
130      */
131     if (iothread->ctx) {
132         aio_context_unref(iothread->ctx);
133         iothread->ctx = NULL;
134     }
135     if (iothread->worker_context) {
136         g_main_context_unref(iothread->worker_context);
137         iothread->worker_context = NULL;
138         g_main_loop_unref(iothread->main_loop);
139         iothread->main_loop = NULL;
140     }
141     qemu_sem_destroy(&iothread->init_done_sem);
142 }
143 
144 static void iothread_init_gcontext(IOThread *iothread)
145 {
146     GSource *source;
147 
148     iothread->worker_context = g_main_context_new();
149     source = aio_get_g_source(iothread_get_aio_context(iothread));
150     g_source_attach(source, iothread->worker_context);
151     g_source_unref(source);
152     iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE);
153 }
154 
155 static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
156 {
157     ERRP_GUARD();
158 
159     aio_context_set_poll_params(iothread->ctx,
160                                 iothread->poll_max_ns,
161                                 iothread->poll_grow,
162                                 iothread->poll_shrink,
163                                 errp);
164     if (*errp) {
165         return;
166     }
167 
168     aio_context_set_aio_params(iothread->ctx,
169                                iothread->aio_max_batch,
170                                errp);
171 }
172 
173 static void iothread_complete(UserCreatable *obj, Error **errp)
174 {
175     Error *local_error = NULL;
176     IOThread *iothread = IOTHREAD(obj);
177     char *thread_name;
178 
179     iothread->stopping = false;
180     iothread->running = true;
181     iothread->ctx = aio_context_new(errp);
182     if (!iothread->ctx) {
183         return;
184     }
185 
186     /*
187      * Init one GMainContext for the iothread unconditionally, even if
188      * it's not used
189      */
190     iothread_init_gcontext(iothread);
191 
192     iothread_set_aio_context_params(iothread, &local_error);
193     if (local_error) {
194         error_propagate(errp, local_error);
195         aio_context_unref(iothread->ctx);
196         iothread->ctx = NULL;
197         return;
198     }
199 
200     /* This assumes we are called from a thread with useful CPU affinity for us
201      * to inherit.
202      */
203     thread_name = g_strdup_printf("IO %s",
204                         object_get_canonical_path_component(OBJECT(obj)));
205     qemu_thread_create(&iothread->thread, thread_name, iothread_run,
206                        iothread, QEMU_THREAD_JOINABLE);
207     g_free(thread_name);
208 
209     /* Wait for initialization to complete */
210     while (iothread->thread_id == -1) {
211         qemu_sem_wait(&iothread->init_done_sem);
212     }
213 }
214 
215 typedef struct {
216     const char *name;
217     ptrdiff_t offset; /* field's byte offset in IOThread struct */
218 } IOThreadParamInfo;
219 
220 static IOThreadParamInfo poll_max_ns_info = {
221     "poll-max-ns", offsetof(IOThread, poll_max_ns),
222 };
223 static IOThreadParamInfo poll_grow_info = {
224     "poll-grow", offsetof(IOThread, poll_grow),
225 };
226 static IOThreadParamInfo poll_shrink_info = {
227     "poll-shrink", offsetof(IOThread, poll_shrink),
228 };
229 static IOThreadParamInfo aio_max_batch_info = {
230     "aio-max-batch", offsetof(IOThread, aio_max_batch),
231 };
232 
233 static void iothread_get_param(Object *obj, Visitor *v,
234         const char *name, IOThreadParamInfo *info, Error **errp)
235 {
236     IOThread *iothread = IOTHREAD(obj);
237     int64_t *field = (void *)iothread + info->offset;
238 
239     visit_type_int64(v, name, field, errp);
240 }
241 
242 static bool iothread_set_param(Object *obj, Visitor *v,
243         const char *name, IOThreadParamInfo *info, Error **errp)
244 {
245     IOThread *iothread = IOTHREAD(obj);
246     int64_t *field = (void *)iothread + info->offset;
247     int64_t value;
248 
249     if (!visit_type_int64(v, name, &value, errp)) {
250         return false;
251     }
252 
253     if (value < 0) {
254         error_setg(errp, "%s value must be in range [0, %" PRId64 "]",
255                    info->name, INT64_MAX);
256         return false;
257     }
258 
259     *field = value;
260 
261     return true;
262 }
263 
264 static void iothread_get_poll_param(Object *obj, Visitor *v,
265         const char *name, void *opaque, Error **errp)
266 {
267     IOThreadParamInfo *info = opaque;
268 
269     iothread_get_param(obj, v, name, info, errp);
270 }
271 
272 static void iothread_set_poll_param(Object *obj, Visitor *v,
273         const char *name, void *opaque, Error **errp)
274 {
275     IOThread *iothread = IOTHREAD(obj);
276     IOThreadParamInfo *info = opaque;
277 
278     if (!iothread_set_param(obj, v, name, info, errp)) {
279         return;
280     }
281 
282     if (iothread->ctx) {
283         aio_context_set_poll_params(iothread->ctx,
284                                     iothread->poll_max_ns,
285                                     iothread->poll_grow,
286                                     iothread->poll_shrink,
287                                     errp);
288     }
289 }
290 
291 static void iothread_get_aio_param(Object *obj, Visitor *v,
292         const char *name, void *opaque, Error **errp)
293 {
294     IOThreadParamInfo *info = opaque;
295 
296     iothread_get_param(obj, v, name, info, errp);
297 }
298 
299 static void iothread_set_aio_param(Object *obj, Visitor *v,
300         const char *name, void *opaque, Error **errp)
301 {
302     IOThread *iothread = IOTHREAD(obj);
303     IOThreadParamInfo *info = opaque;
304 
305     if (!iothread_set_param(obj, v, name, info, errp)) {
306         return;
307     }
308 
309     if (iothread->ctx) {
310         aio_context_set_aio_params(iothread->ctx,
311                                    iothread->aio_max_batch,
312                                    errp);
313     }
314 }
315 
316 static void iothread_class_init(ObjectClass *klass, void *class_data)
317 {
318     UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
319     ucc->complete = iothread_complete;
320 
321     object_class_property_add(klass, "poll-max-ns", "int",
322                               iothread_get_poll_param,
323                               iothread_set_poll_param,
324                               NULL, &poll_max_ns_info);
325     object_class_property_add(klass, "poll-grow", "int",
326                               iothread_get_poll_param,
327                               iothread_set_poll_param,
328                               NULL, &poll_grow_info);
329     object_class_property_add(klass, "poll-shrink", "int",
330                               iothread_get_poll_param,
331                               iothread_set_poll_param,
332                               NULL, &poll_shrink_info);
333     object_class_property_add(klass, "aio-max-batch", "int",
334                               iothread_get_aio_param,
335                               iothread_set_aio_param,
336                               NULL, &aio_max_batch_info);
337 }
338 
339 static const TypeInfo iothread_info = {
340     .name = TYPE_IOTHREAD,
341     .parent = TYPE_OBJECT,
342     .class_init = iothread_class_init,
343     .instance_size = sizeof(IOThread),
344     .instance_init = iothread_instance_init,
345     .instance_finalize = iothread_instance_finalize,
346     .interfaces = (InterfaceInfo[]) {
347         {TYPE_USER_CREATABLE},
348         {}
349     },
350 };
351 
352 static void iothread_register_types(void)
353 {
354     type_register_static(&iothread_info);
355 }
356 
357 type_init(iothread_register_types)
358 
359 char *iothread_get_id(IOThread *iothread)
360 {
361     return g_strdup(object_get_canonical_path_component(OBJECT(iothread)));
362 }
363 
364 AioContext *iothread_get_aio_context(IOThread *iothread)
365 {
366     return iothread->ctx;
367 }
368 
369 static int query_one_iothread(Object *object, void *opaque)
370 {
371     IOThreadInfoList ***tail = opaque;
372     IOThreadInfo *info;
373     IOThread *iothread;
374 
375     iothread = (IOThread *)object_dynamic_cast(object, TYPE_IOTHREAD);
376     if (!iothread) {
377         return 0;
378     }
379 
380     info = g_new0(IOThreadInfo, 1);
381     info->id = iothread_get_id(iothread);
382     info->thread_id = iothread->thread_id;
383     info->poll_max_ns = iothread->poll_max_ns;
384     info->poll_grow = iothread->poll_grow;
385     info->poll_shrink = iothread->poll_shrink;
386     info->aio_max_batch = iothread->aio_max_batch;
387 
388     QAPI_LIST_APPEND(*tail, info);
389     return 0;
390 }
391 
392 IOThreadInfoList *qmp_query_iothreads(Error **errp)
393 {
394     IOThreadInfoList *head = NULL;
395     IOThreadInfoList **prev = &head;
396     Object *container = object_get_objects_root();
397 
398     object_child_foreach(container, query_one_iothread, &prev);
399     return head;
400 }
401 
402 GMainContext *iothread_get_g_main_context(IOThread *iothread)
403 {
404     qatomic_set(&iothread->run_gcontext, 1);
405     aio_notify(iothread->ctx);
406     return iothread->worker_context;
407 }
408 
409 IOThread *iothread_create(const char *id, Error **errp)
410 {
411     Object *obj;
412 
413     obj = object_new_with_props(TYPE_IOTHREAD,
414                                 object_get_internal_root(),
415                                 id, errp, NULL);
416 
417     return IOTHREAD(obj);
418 }
419 
420 void iothread_destroy(IOThread *iothread)
421 {
422     object_unparent(OBJECT(iothread));
423 }
424 
425 /* Lookup IOThread by its id.  Only finds user-created objects, not internal
426  * iothread_create() objects. */
427 IOThread *iothread_by_id(const char *id)
428 {
429     return IOTHREAD(object_resolve_path_type(id, TYPE_IOTHREAD, NULL));
430 }
431 
432 bool qemu_in_iothread(void)
433 {
434     return qemu_get_current_aio_context() == qemu_get_aio_context() ?
435                     false : true;
436 }
437