xref: /qemu/accel/tcg/tb-jmp-cache.h (revision 5ac034b1)
1 /*
2  * The per-CPU TranslationBlock jump cache.
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #ifndef ACCEL_TCG_TB_JMP_CACHE_H
10 #define ACCEL_TCG_TB_JMP_CACHE_H
11 
12 #define TB_JMP_CACHE_BITS 12
13 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
14 
15 /*
16  * Accessed in parallel; all accesses to 'tb' must be atomic.
17  * For TARGET_TB_PCREL, accesses to 'pc' must be protected by
18  * a load_acquire/store_release to 'tb'.
19  */
20 struct CPUJumpCache {
21     struct rcu_head rcu;
22     struct {
23         TranslationBlock *tb;
24 #if TARGET_TB_PCREL
25         target_ulong pc;
26 #endif
27     } array[TB_JMP_CACHE_SIZE];
28 };
29 
30 static inline TranslationBlock *
31 tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash)
32 {
33 #if TARGET_TB_PCREL
34     /* Use acquire to ensure current load of pc from jc. */
35     return qatomic_load_acquire(&jc->array[hash].tb);
36 #else
37     /* Use rcu_read to ensure current load of pc from *tb. */
38     return qatomic_rcu_read(&jc->array[hash].tb);
39 #endif
40 }
41 
42 static inline target_ulong
43 tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
44 {
45 #if TARGET_TB_PCREL
46     return jc->array[hash].pc;
47 #else
48     return tb_pc(tb);
49 #endif
50 }
51 
52 static inline void
53 tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
54                  TranslationBlock *tb, target_ulong pc)
55 {
56 #if TARGET_TB_PCREL
57     jc->array[hash].pc = pc;
58     /* Use store_release on tb to ensure pc is written first. */
59     qatomic_store_release(&jc->array[hash].tb, tb);
60 #else
61     /* Use the pc value already stored in tb->pc. */
62     qatomic_set(&jc->array[hash].tb, tb);
63 #endif
64 }
65 
66 #endif /* ACCEL_TCG_TB_JMP_CACHE_H */
67