xref: /qemu/tcg/region.c (revision c7bb41b4)
1 /*
2  * Memory region management for Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "qemu/units.h"
27 #include "qapi/error.h"
28 #include "exec/exec-all.h"
29 #include "tcg/tcg.h"
30 #include "tcg-internal.h"
31 
32 
33 struct tcg_region_tree {
34     QemuMutex lock;
35     GTree *tree;
36     /* padding to avoid false sharing is computed at run-time */
37 };
38 
39 /*
40  * We divide code_gen_buffer into equally-sized "regions" that TCG threads
41  * dynamically allocate from as demand dictates. Given appropriate region
42  * sizing, this minimizes flushes even when some TCG threads generate a lot
43  * more code than others.
44  */
45 struct tcg_region_state {
46     QemuMutex lock;
47 
48     /* fields set at init time */
49     void *start_aligned;
50     void *after_prologue;
51     size_t n;
52     size_t size; /* size of one region */
53     size_t stride; /* .size + guard size */
54     size_t total_size; /* size of entire buffer, >= n * stride */
55 
56     /* fields protected by the lock */
57     size_t current; /* current region index */
58     size_t agg_size_full; /* aggregate size of full regions */
59 };
60 
61 static struct tcg_region_state region;
62 
63 /*
64  * This is an array of struct tcg_region_tree's, with padding.
65  * We use void * to simplify the computation of region_trees[i]; each
66  * struct is found every tree_size bytes.
67  */
68 static void *region_trees;
69 static size_t tree_size;
70 
71 bool in_code_gen_buffer(const void *p)
72 {
73     /*
74      * Much like it is valid to have a pointer to the byte past the
75      * end of an array (so long as you don't dereference it), allow
76      * a pointer to the byte past the end of the code gen buffer.
77      */
78     return (size_t)(p - region.start_aligned) <= region.total_size;
79 }
80 
81 #ifdef CONFIG_DEBUG_TCG
82 const void *tcg_splitwx_to_rx(void *rw)
83 {
84     /* Pass NULL pointers unchanged. */
85     if (rw) {
86         g_assert(in_code_gen_buffer(rw));
87         rw += tcg_splitwx_diff;
88     }
89     return rw;
90 }
91 
92 void *tcg_splitwx_to_rw(const void *rx)
93 {
94     /* Pass NULL pointers unchanged. */
95     if (rx) {
96         rx -= tcg_splitwx_diff;
97         /* Assert that we end with a pointer in the rw region. */
98         g_assert(in_code_gen_buffer(rx));
99     }
100     return (void *)rx;
101 }
102 #endif /* CONFIG_DEBUG_TCG */
103 
104 /* compare a pointer @ptr and a tb_tc @s */
105 static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
106 {
107     if (ptr >= s->ptr + s->size) {
108         return 1;
109     } else if (ptr < s->ptr) {
110         return -1;
111     }
112     return 0;
113 }
114 
115 static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
116 {
117     const struct tb_tc *a = ap;
118     const struct tb_tc *b = bp;
119 
120     /*
121      * When both sizes are set, we know this isn't a lookup.
122      * This is the most likely case: every TB must be inserted; lookups
123      * are a lot less frequent.
124      */
125     if (likely(a->size && b->size)) {
126         if (a->ptr > b->ptr) {
127             return 1;
128         } else if (a->ptr < b->ptr) {
129             return -1;
130         }
131         /* a->ptr == b->ptr should happen only on deletions */
132         g_assert(a->size == b->size);
133         return 0;
134     }
135     /*
136      * All lookups have either .size field set to 0.
137      * From the glib sources we see that @ap is always the lookup key. However
138      * the docs provide no guarantee, so we just mark this case as likely.
139      */
140     if (likely(a->size == 0)) {
141         return ptr_cmp_tb_tc(a->ptr, b);
142     }
143     return ptr_cmp_tb_tc(b->ptr, a);
144 }
145 
146 static void tcg_region_trees_init(void)
147 {
148     size_t i;
149 
150     tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
151     region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
152     for (i = 0; i < region.n; i++) {
153         struct tcg_region_tree *rt = region_trees + i * tree_size;
154 
155         qemu_mutex_init(&rt->lock);
156         rt->tree = g_tree_new(tb_tc_cmp);
157     }
158 }
159 
160 static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p)
161 {
162     size_t region_idx;
163 
164     /*
165      * Like tcg_splitwx_to_rw, with no assert.  The pc may come from
166      * a signal handler over which the caller has no control.
167      */
168     if (!in_code_gen_buffer(p)) {
169         p -= tcg_splitwx_diff;
170         if (!in_code_gen_buffer(p)) {
171             return NULL;
172         }
173     }
174 
175     if (p < region.start_aligned) {
176         region_idx = 0;
177     } else {
178         ptrdiff_t offset = p - region.start_aligned;
179 
180         if (offset > region.stride * (region.n - 1)) {
181             region_idx = region.n - 1;
182         } else {
183             region_idx = offset / region.stride;
184         }
185     }
186     return region_trees + region_idx * tree_size;
187 }
188 
189 void tcg_tb_insert(TranslationBlock *tb)
190 {
191     struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
192 
193     g_assert(rt != NULL);
194     qemu_mutex_lock(&rt->lock);
195     g_tree_insert(rt->tree, &tb->tc, tb);
196     qemu_mutex_unlock(&rt->lock);
197 }
198 
199 void tcg_tb_remove(TranslationBlock *tb)
200 {
201     struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
202 
203     g_assert(rt != NULL);
204     qemu_mutex_lock(&rt->lock);
205     g_tree_remove(rt->tree, &tb->tc);
206     qemu_mutex_unlock(&rt->lock);
207 }
208 
209 /*
210  * Find the TB 'tb' such that
211  * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
212  * Return NULL if not found.
213  */
214 TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
215 {
216     struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
217     TranslationBlock *tb;
218     struct tb_tc s = { .ptr = (void *)tc_ptr };
219 
220     if (rt == NULL) {
221         return NULL;
222     }
223 
224     qemu_mutex_lock(&rt->lock);
225     tb = g_tree_lookup(rt->tree, &s);
226     qemu_mutex_unlock(&rt->lock);
227     return tb;
228 }
229 
230 static void tcg_region_tree_lock_all(void)
231 {
232     size_t i;
233 
234     for (i = 0; i < region.n; i++) {
235         struct tcg_region_tree *rt = region_trees + i * tree_size;
236 
237         qemu_mutex_lock(&rt->lock);
238     }
239 }
240 
241 static void tcg_region_tree_unlock_all(void)
242 {
243     size_t i;
244 
245     for (i = 0; i < region.n; i++) {
246         struct tcg_region_tree *rt = region_trees + i * tree_size;
247 
248         qemu_mutex_unlock(&rt->lock);
249     }
250 }
251 
252 void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
253 {
254     size_t i;
255 
256     tcg_region_tree_lock_all();
257     for (i = 0; i < region.n; i++) {
258         struct tcg_region_tree *rt = region_trees + i * tree_size;
259 
260         g_tree_foreach(rt->tree, func, user_data);
261     }
262     tcg_region_tree_unlock_all();
263 }
264 
265 size_t tcg_nb_tbs(void)
266 {
267     size_t nb_tbs = 0;
268     size_t i;
269 
270     tcg_region_tree_lock_all();
271     for (i = 0; i < region.n; i++) {
272         struct tcg_region_tree *rt = region_trees + i * tree_size;
273 
274         nb_tbs += g_tree_nnodes(rt->tree);
275     }
276     tcg_region_tree_unlock_all();
277     return nb_tbs;
278 }
279 
280 static gboolean tcg_region_tree_traverse(gpointer k, gpointer v, gpointer data)
281 {
282     TranslationBlock *tb = v;
283 
284     tb_destroy(tb);
285     return FALSE;
286 }
287 
288 static void tcg_region_tree_reset_all(void)
289 {
290     size_t i;
291 
292     tcg_region_tree_lock_all();
293     for (i = 0; i < region.n; i++) {
294         struct tcg_region_tree *rt = region_trees + i * tree_size;
295 
296         g_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL);
297         /* Increment the refcount first so that destroy acts as a reset */
298         g_tree_ref(rt->tree);
299         g_tree_destroy(rt->tree);
300     }
301     tcg_region_tree_unlock_all();
302 }
303 
304 static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
305 {
306     void *start, *end;
307 
308     start = region.start_aligned + curr_region * region.stride;
309     end = start + region.size;
310 
311     if (curr_region == 0) {
312         start = region.after_prologue;
313     }
314     /* The final region may have a few extra pages due to earlier rounding. */
315     if (curr_region == region.n - 1) {
316         end = region.start_aligned + region.total_size;
317     }
318 
319     *pstart = start;
320     *pend = end;
321 }
322 
323 static void tcg_region_assign(TCGContext *s, size_t curr_region)
324 {
325     void *start, *end;
326 
327     tcg_region_bounds(curr_region, &start, &end);
328 
329     s->code_gen_buffer = start;
330     s->code_gen_ptr = start;
331     s->code_gen_buffer_size = end - start;
332     s->code_gen_highwater = end - TCG_HIGHWATER;
333 }
334 
335 static bool tcg_region_alloc__locked(TCGContext *s)
336 {
337     if (region.current == region.n) {
338         return true;
339     }
340     tcg_region_assign(s, region.current);
341     region.current++;
342     return false;
343 }
344 
345 /*
346  * Request a new region once the one in use has filled up.
347  * Returns true on error.
348  */
349 bool tcg_region_alloc(TCGContext *s)
350 {
351     bool err;
352     /* read the region size now; alloc__locked will overwrite it on success */
353     size_t size_full = s->code_gen_buffer_size;
354 
355     qemu_mutex_lock(&region.lock);
356     err = tcg_region_alloc__locked(s);
357     if (!err) {
358         region.agg_size_full += size_full - TCG_HIGHWATER;
359     }
360     qemu_mutex_unlock(&region.lock);
361     return err;
362 }
363 
364 /*
365  * Perform a context's first region allocation.
366  * This function does _not_ increment region.agg_size_full.
367  */
368 static void tcg_region_initial_alloc__locked(TCGContext *s)
369 {
370     bool err = tcg_region_alloc__locked(s);
371     g_assert(!err);
372 }
373 
374 void tcg_region_initial_alloc(TCGContext *s)
375 {
376     qemu_mutex_lock(&region.lock);
377     tcg_region_initial_alloc__locked(s);
378     qemu_mutex_unlock(&region.lock);
379 }
380 
381 /* Call from a safe-work context */
382 void tcg_region_reset_all(void)
383 {
384     unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
385     unsigned int i;
386 
387     qemu_mutex_lock(&region.lock);
388     region.current = 0;
389     region.agg_size_full = 0;
390 
391     for (i = 0; i < n_ctxs; i++) {
392         TCGContext *s = qatomic_read(&tcg_ctxs[i]);
393         tcg_region_initial_alloc__locked(s);
394     }
395     qemu_mutex_unlock(&region.lock);
396 
397     tcg_region_tree_reset_all();
398 }
399 
400 static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus)
401 {
402 #ifdef CONFIG_USER_ONLY
403     return 1;
404 #else
405     size_t n_regions;
406 
407     /*
408      * It is likely that some vCPUs will translate more code than others,
409      * so we first try to set more regions than max_cpus, with those regions
410      * being of reasonable size. If that's not possible we make do by evenly
411      * dividing the code_gen_buffer among the vCPUs.
412      */
413     /* Use a single region if all we have is one vCPU thread */
414     if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
415         return 1;
416     }
417 
418     /*
419      * Try to have more regions than max_cpus, with each region being >= 2 MB.
420      * If we can't, then just allocate one region per vCPU thread.
421      */
422     n_regions = tb_size / (2 * MiB);
423     if (n_regions <= max_cpus) {
424         return max_cpus;
425     }
426     return MIN(n_regions, max_cpus * 8);
427 #endif
428 }
429 
430 /*
431  * Minimum size of the code gen buffer.  This number is randomly chosen,
432  * but not so small that we can't have a fair number of TB's live.
433  *
434  * Maximum size, MAX_CODE_GEN_BUFFER_SIZE, is defined in tcg-target.h.
435  * Unless otherwise indicated, this is constrained by the range of
436  * direct branches on the host cpu, as used by the TCG implementation
437  * of goto_tb.
438  */
439 #define MIN_CODE_GEN_BUFFER_SIZE     (1 * MiB)
440 
441 #if TCG_TARGET_REG_BITS == 32
442 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
443 #ifdef CONFIG_USER_ONLY
444 /*
445  * For user mode on smaller 32 bit systems we may run into trouble
446  * allocating big chunks of data in the right place. On these systems
447  * we utilise a static code generation buffer directly in the binary.
448  */
449 #define USE_STATIC_CODE_GEN_BUFFER
450 #endif
451 #else /* TCG_TARGET_REG_BITS == 64 */
452 #ifdef CONFIG_USER_ONLY
453 /*
454  * As user-mode emulation typically means running multiple instances
455  * of the translator don't go too nuts with our default code gen
456  * buffer lest we make things too hard for the OS.
457  */
458 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
459 #else
460 /*
461  * We expect most system emulation to run one or two guests per host.
462  * Users running large scale system emulation may want to tweak their
463  * runtime setup via the tb-size control on the command line.
464  */
465 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
466 #endif
467 #endif
468 
469 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
470   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
471    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
472 
473 #ifdef __mips__
474 /*
475  * In order to use J and JAL within the code_gen_buffer, we require
476  * that the buffer not cross a 256MB boundary.
477  */
478 static inline bool cross_256mb(void *addr, size_t size)
479 {
480     return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
481 }
482 
483 /*
484  * We weren't able to allocate a buffer without crossing that boundary,
485  * so make do with the larger portion of the buffer that doesn't cross.
486  * Returns the new base and size of the buffer in *obuf and *osize.
487  */
488 static inline void split_cross_256mb(void **obuf, size_t *osize,
489                                      void *buf1, size_t size1)
490 {
491     void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
492     size_t size2 = buf1 + size1 - buf2;
493 
494     size1 = buf2 - buf1;
495     if (size1 < size2) {
496         size1 = size2;
497         buf1 = buf2;
498     }
499 
500     *obuf = buf1;
501     *osize = size1;
502 }
503 #endif
504 
505 #ifdef USE_STATIC_CODE_GEN_BUFFER
506 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
507     __attribute__((aligned(CODE_GEN_ALIGN)));
508 
509 static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
510 {
511     void *buf, *end;
512     size_t size;
513 
514     if (splitwx > 0) {
515         error_setg(errp, "jit split-wx not supported");
516         return -1;
517     }
518 
519     /* page-align the beginning and end of the buffer */
520     buf = static_code_gen_buffer;
521     end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
522     buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
523     end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
524 
525     size = end - buf;
526 
527     /* Honor a command-line option limiting the size of the buffer.  */
528     if (size > tb_size) {
529         size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
530     }
531 
532 #ifdef __mips__
533     if (cross_256mb(buf, size)) {
534         split_cross_256mb(&buf, &size, buf, size);
535     }
536 #endif
537 
538     region.start_aligned = buf;
539     region.total_size = size;
540 
541     return PROT_READ | PROT_WRITE;
542 }
543 #elif defined(_WIN32)
544 static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
545 {
546     void *buf;
547 
548     if (splitwx > 0) {
549         error_setg(errp, "jit split-wx not supported");
550         return -1;
551     }
552 
553     buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
554                              PAGE_EXECUTE_READWRITE);
555     if (buf == NULL) {
556         error_setg_win32(errp, GetLastError(),
557                          "allocate %zu bytes for jit buffer", size);
558         return false;
559     }
560 
561     region.start_aligned = buf;
562     region.total_size = size;
563 
564     return PAGE_READ | PAGE_WRITE | PAGE_EXEC;
565 }
566 #else
567 static int alloc_code_gen_buffer_anon(size_t size, int prot,
568                                       int flags, Error **errp)
569 {
570     void *buf;
571 
572     buf = mmap(NULL, size, prot, flags, -1, 0);
573     if (buf == MAP_FAILED) {
574         error_setg_errno(errp, errno,
575                          "allocate %zu bytes for jit buffer", size);
576         return -1;
577     }
578 
579 #ifdef __mips__
580     if (cross_256mb(buf, size)) {
581         /*
582          * Try again, with the original still mapped, to avoid re-acquiring
583          * the same 256mb crossing.
584          */
585         size_t size2;
586         void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
587         switch ((int)(buf2 != MAP_FAILED)) {
588         case 1:
589             if (!cross_256mb(buf2, size)) {
590                 /* Success!  Use the new buffer.  */
591                 munmap(buf, size);
592                 break;
593             }
594             /* Failure.  Work with what we had.  */
595             munmap(buf2, size);
596             /* fallthru */
597         default:
598             /* Split the original buffer.  Free the smaller half.  */
599             split_cross_256mb(&buf2, &size2, buf, size);
600             if (buf == buf2) {
601                 munmap(buf + size2, size - size2);
602             } else {
603                 munmap(buf, size - size2);
604             }
605             size = size2;
606             break;
607         }
608         buf = buf2;
609     }
610 #endif
611 
612     region.start_aligned = buf;
613     region.total_size = size;
614     return prot;
615 }
616 
617 #ifndef CONFIG_TCG_INTERPRETER
618 #ifdef CONFIG_POSIX
619 #include "qemu/memfd.h"
620 
621 static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
622 {
623     void *buf_rw = NULL, *buf_rx = MAP_FAILED;
624     int fd = -1;
625 
626 #ifdef __mips__
627     /* Find space for the RX mapping, vs the 256MiB regions. */
628     if (alloc_code_gen_buffer_anon(size, PROT_NONE,
629                                    MAP_PRIVATE | MAP_ANONYMOUS |
630                                    MAP_NORESERVE, errp) < 0) {
631         return false;
632     }
633     /* The size of the mapping may have been adjusted. */
634     buf_rx = region.start_aligned;
635     size = region.total_size;
636 #endif
637 
638     buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
639     if (buf_rw == NULL) {
640         goto fail;
641     }
642 
643 #ifdef __mips__
644     void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
645                      MAP_SHARED | MAP_FIXED, fd, 0);
646     if (tmp != buf_rx) {
647         goto fail_rx;
648     }
649 #else
650     buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
651     if (buf_rx == MAP_FAILED) {
652         goto fail_rx;
653     }
654 #endif
655 
656     close(fd);
657     region.start_aligned = buf_rw;
658     region.total_size = size;
659     tcg_splitwx_diff = buf_rx - buf_rw;
660 
661     return PROT_READ | PROT_WRITE;
662 
663  fail_rx:
664     error_setg_errno(errp, errno, "failed to map shared memory for execute");
665  fail:
666     if (buf_rx != MAP_FAILED) {
667         munmap(buf_rx, size);
668     }
669     if (buf_rw) {
670         munmap(buf_rw, size);
671     }
672     if (fd >= 0) {
673         close(fd);
674     }
675     return -1;
676 }
677 #endif /* CONFIG_POSIX */
678 
679 #ifdef CONFIG_DARWIN
680 #include <mach/mach.h>
681 
682 extern kern_return_t mach_vm_remap(vm_map_t target_task,
683                                    mach_vm_address_t *target_address,
684                                    mach_vm_size_t size,
685                                    mach_vm_offset_t mask,
686                                    int flags,
687                                    vm_map_t src_task,
688                                    mach_vm_address_t src_address,
689                                    boolean_t copy,
690                                    vm_prot_t *cur_protection,
691                                    vm_prot_t *max_protection,
692                                    vm_inherit_t inheritance);
693 
694 static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
695 {
696     kern_return_t ret;
697     mach_vm_address_t buf_rw, buf_rx;
698     vm_prot_t cur_prot, max_prot;
699 
700     /* Map the read-write portion via normal anon memory. */
701     if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
702                                     MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
703         return -1;
704     }
705 
706     buf_rw = (mach_vm_address_t)region.start_aligned;
707     buf_rx = 0;
708     ret = mach_vm_remap(mach_task_self(),
709                         &buf_rx,
710                         size,
711                         0,
712                         VM_FLAGS_ANYWHERE,
713                         mach_task_self(),
714                         buf_rw,
715                         false,
716                         &cur_prot,
717                         &max_prot,
718                         VM_INHERIT_NONE);
719     if (ret != KERN_SUCCESS) {
720         /* TODO: Convert "ret" to a human readable error message. */
721         error_setg(errp, "vm_remap for jit splitwx failed");
722         munmap((void *)buf_rw, size);
723         return -1;
724     }
725 
726     if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
727         error_setg_errno(errp, errno, "mprotect for jit splitwx");
728         munmap((void *)buf_rx, size);
729         munmap((void *)buf_rw, size);
730         return -1;
731     }
732 
733     tcg_splitwx_diff = buf_rx - buf_rw;
734     return PROT_READ | PROT_WRITE;
735 }
736 #endif /* CONFIG_DARWIN */
737 #endif /* CONFIG_TCG_INTERPRETER */
738 
739 static int alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
740 {
741 #ifndef CONFIG_TCG_INTERPRETER
742 # ifdef CONFIG_DARWIN
743     return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
744 # endif
745 # ifdef CONFIG_POSIX
746     return alloc_code_gen_buffer_splitwx_memfd(size, errp);
747 # endif
748 #endif
749     error_setg(errp, "jit split-wx not supported");
750     return -1;
751 }
752 
753 static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
754 {
755     ERRP_GUARD();
756     int prot, flags;
757 
758     if (splitwx) {
759         prot = alloc_code_gen_buffer_splitwx(size, errp);
760         if (prot >= 0) {
761             return prot;
762         }
763         /*
764          * If splitwx force-on (1), fail;
765          * if splitwx default-on (-1), fall through to splitwx off.
766          */
767         if (splitwx > 0) {
768             return -1;
769         }
770         error_free_or_abort(errp);
771     }
772 
773     /*
774      * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
775      * rejects a permission change from RWX -> NONE when reserving the
776      * guard pages later.  We can go the other way with the same number
777      * of syscalls, so always begin with PROT_NONE.
778      */
779     prot = PROT_NONE;
780     flags = MAP_PRIVATE | MAP_ANONYMOUS;
781 #ifdef CONFIG_DARWIN
782     /* Applicable to both iOS and macOS (Apple Silicon). */
783     if (!splitwx) {
784         flags |= MAP_JIT;
785     }
786 #endif
787 
788     return alloc_code_gen_buffer_anon(size, prot, flags, errp);
789 }
790 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
791 
792 /*
793  * Initializes region partitioning.
794  *
795  * Called at init time from the parent thread (i.e. the one calling
796  * tcg_context_init), after the target's TCG globals have been set.
797  *
798  * Region partitioning works by splitting code_gen_buffer into separate regions,
799  * and then assigning regions to TCG threads so that the threads can translate
800  * code in parallel without synchronization.
801  *
802  * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
803  * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
804  * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
805  * must have been parsed before calling this function, since it calls
806  * qemu_tcg_mttcg_enabled().
807  *
808  * In user-mode we use a single region.  Having multiple regions in user-mode
809  * is not supported, because the number of vCPU threads (recall that each thread
810  * spawned by the guest corresponds to a vCPU thread) is only bounded by the
811  * OS, and usually this number is huge (tens of thousands is not uncommon).
812  * Thus, given this large bound on the number of vCPU threads and the fact
813  * that code_gen_buffer is allocated at compile-time, we cannot guarantee
814  * that the availability of at least one region per vCPU thread.
815  *
816  * However, this user-mode limitation is unlikely to be a significant problem
817  * in practice. Multi-threaded guests share most if not all of their translated
818  * code, which makes parallel code generation less appealing than in softmmu.
819  */
820 void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
821 {
822     const size_t page_size = qemu_real_host_page_size;
823     size_t region_size;
824     int have_prot, need_prot;
825 
826     /* Size the buffer.  */
827     if (tb_size == 0) {
828         size_t phys_mem = qemu_get_host_physmem();
829         if (phys_mem == 0) {
830             tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
831         } else {
832             tb_size = QEMU_ALIGN_DOWN(phys_mem / 8, page_size);
833             tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, tb_size);
834         }
835     }
836     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
837         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
838     }
839     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
840         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
841     }
842 
843     have_prot = alloc_code_gen_buffer(tb_size, splitwx, &error_fatal);
844     assert(have_prot >= 0);
845 
846     /* Request large pages for the buffer and the splitwx.  */
847     qemu_madvise(region.start_aligned, region.total_size, QEMU_MADV_HUGEPAGE);
848     if (tcg_splitwx_diff) {
849         qemu_madvise(region.start_aligned + tcg_splitwx_diff,
850                      region.total_size, QEMU_MADV_HUGEPAGE);
851     }
852 
853     /*
854      * Make region_size a multiple of page_size, using aligned as the start.
855      * As a result of this we might end up with a few extra pages at the end of
856      * the buffer; we will assign those to the last region.
857      */
858     region.n = tcg_n_regions(tb_size, max_cpus);
859     region_size = tb_size / region.n;
860     region_size = QEMU_ALIGN_DOWN(region_size, page_size);
861 
862     /* A region must have at least 2 pages; one code, one guard */
863     g_assert(region_size >= 2 * page_size);
864     region.stride = region_size;
865 
866     /* Reserve space for guard pages. */
867     region.size = region_size - page_size;
868     region.total_size -= page_size;
869 
870     /*
871      * The first region will be smaller than the others, via the prologue,
872      * which has yet to be allocated.  For now, the first region begins at
873      * the page boundary.
874      */
875     region.after_prologue = region.start_aligned;
876 
877     /* init the region struct */
878     qemu_mutex_init(&region.lock);
879 
880     /*
881      * Set guard pages in the rw buffer, as that's the one into which
882      * buffer overruns could occur.  Do not set guard pages in the rx
883      * buffer -- let that one use hugepages throughout.
884      * Work with the page protections set up with the initial mapping.
885      */
886     need_prot = PAGE_READ | PAGE_WRITE;
887 #ifndef CONFIG_TCG_INTERPRETER
888     if (tcg_splitwx_diff == 0) {
889         need_prot |= PAGE_EXEC;
890     }
891 #endif
892     for (size_t i = 0, n = region.n; i < n; i++) {
893         void *start, *end;
894 
895         tcg_region_bounds(i, &start, &end);
896         if (have_prot != need_prot) {
897             int rc;
898 
899             if (need_prot == (PAGE_READ | PAGE_WRITE | PAGE_EXEC)) {
900                 rc = qemu_mprotect_rwx(start, end - start);
901             } else if (need_prot == (PAGE_READ | PAGE_WRITE)) {
902                 rc = qemu_mprotect_rw(start, end - start);
903             } else {
904                 g_assert_not_reached();
905             }
906             if (rc) {
907                 error_setg_errno(&error_fatal, errno,
908                                  "mprotect of jit buffer");
909             }
910         }
911         if (have_prot != 0) {
912             /* Guard pages are nice for bug detection but are not essential. */
913             (void)qemu_mprotect_none(end, page_size);
914         }
915     }
916 
917     tcg_region_trees_init();
918 
919     /*
920      * Leave the initial context initialized to the first region.
921      * This will be the context into which we generate the prologue.
922      * It is also the only context for CONFIG_USER_ONLY.
923      */
924     tcg_region_initial_alloc__locked(&tcg_init_ctx);
925 }
926 
927 void tcg_region_prologue_set(TCGContext *s)
928 {
929     /* Deduct the prologue from the first region.  */
930     g_assert(region.start_aligned == s->code_gen_buffer);
931     region.after_prologue = s->code_ptr;
932 
933     /* Recompute boundaries of the first region. */
934     tcg_region_assign(s, 0);
935 
936     /* Register the balance of the buffer with gdb. */
937     tcg_register_jit(tcg_splitwx_to_rx(region.after_prologue),
938                      region.start_aligned + region.total_size -
939                      region.after_prologue);
940 }
941 
942 /*
943  * Returns the size (in bytes) of all translated code (i.e. from all regions)
944  * currently in the cache.
945  * See also: tcg_code_capacity()
946  * Do not confuse with tcg_current_code_size(); that one applies to a single
947  * TCG context.
948  */
949 size_t tcg_code_size(void)
950 {
951     unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
952     unsigned int i;
953     size_t total;
954 
955     qemu_mutex_lock(&region.lock);
956     total = region.agg_size_full;
957     for (i = 0; i < n_ctxs; i++) {
958         const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
959         size_t size;
960 
961         size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
962         g_assert(size <= s->code_gen_buffer_size);
963         total += size;
964     }
965     qemu_mutex_unlock(&region.lock);
966     return total;
967 }
968 
969 /*
970  * Returns the code capacity (in bytes) of the entire cache, i.e. including all
971  * regions.
972  * See also: tcg_code_size()
973  */
974 size_t tcg_code_capacity(void)
975 {
976     size_t guard_size, capacity;
977 
978     /* no need for synchronization; these variables are set at init time */
979     guard_size = region.stride - region.size;
980     capacity = region.total_size;
981     capacity -= (region.n - 1) * guard_size;
982     capacity -= region.n * TCG_HIGHWATER;
983 
984     return capacity;
985 }
986 
987 size_t tcg_tb_phys_invalidate_count(void)
988 {
989     unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
990     unsigned int i;
991     size_t total = 0;
992 
993     for (i = 0; i < n_ctxs; i++) {
994         const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
995 
996         total += qatomic_read(&s->tb_phys_invalidate_count);
997     }
998     return total;
999 }
1000