xref: /qemu/include/qemu/stats64.h (revision ab930e80)
1 /*
2  * Atomic operations on 64-bit quantities.
3  *
4  * Copyright (C) 2017 Red Hat, Inc.
5  *
6  * Author: Paolo Bonzini <pbonzini@redhat.com>
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2 or later.
9  * See the COPYING file in the top-level directory.
10  */
11 
12 #ifndef QEMU_STATS64_H
13 #define QEMU_STATS64_H
14 
15 #include "qemu/atomic.h"
16 
17 /* This provides atomic operations on 64-bit type, using a reader-writer
18  * spinlock on architectures that do not have 64-bit accesses.  Even on
19  * those architectures, it tries hard not to take the lock.
20  */
21 
22 typedef struct Stat64 {
23 #ifdef CONFIG_ATOMIC64
24     aligned_uint64_t value;
25 #else
26     uint32_t low, high;
27     uint32_t lock;
28 #endif
29 } Stat64;
30 
31 #ifdef CONFIG_ATOMIC64
32 static inline void stat64_init(Stat64 *s, uint64_t value)
33 {
34     /* This is not guaranteed to be atomic! */
35     *s = (Stat64) { value };
36 }
37 
38 static inline uint64_t stat64_get(const Stat64 *s)
39 {
40     return qatomic_read__nocheck(&s->value);
41 }
42 
43 static inline void stat64_set(Stat64 *s, uint64_t value)
44 {
45     qatomic_set__nocheck(&s->value, value);
46 }
47 
48 static inline void stat64_add(Stat64 *s, uint64_t value)
49 {
50     qatomic_add(&s->value, value);
51 }
52 
53 static inline void stat64_min(Stat64 *s, uint64_t value)
54 {
55     uint64_t orig = qatomic_read__nocheck(&s->value);
56     while (orig > value) {
57         orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
58     }
59 }
60 
61 static inline void stat64_max(Stat64 *s, uint64_t value)
62 {
63     uint64_t orig = qatomic_read__nocheck(&s->value);
64     while (orig < value) {
65         orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
66     }
67 }
68 #else
69 uint64_t stat64_get(const Stat64 *s);
70 void stat64_set(Stat64 *s, uint64_t value);
71 bool stat64_min_slow(Stat64 *s, uint64_t value);
72 bool stat64_max_slow(Stat64 *s, uint64_t value);
73 bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high);
74 
75 static inline void stat64_init(Stat64 *s, uint64_t value)
76 {
77     /* This is not guaranteed to be atomic! */
78     *s = (Stat64) { .low = value, .high = value >> 32, .lock = 0 };
79 }
80 
81 static inline void stat64_add(Stat64 *s, uint64_t value)
82 {
83     uint32_t low, high;
84     high = value >> 32;
85     low = (uint32_t) value;
86     if (!low) {
87         if (high) {
88             qatomic_add(&s->high, high);
89         }
90         return;
91     }
92 
93     for (;;) {
94         uint32_t orig = s->low;
95         uint32_t result = orig + low;
96         uint32_t old;
97 
98         if (result < low || high) {
99             /* If the high part is affected, take the lock.  */
100             if (stat64_add32_carry(s, low, high)) {
101                 return;
102             }
103             continue;
104         }
105 
106         /* No carry, try with a 32-bit cmpxchg.  The result is independent of
107          * the high 32 bits, so it can race just fine with stat64_add32_carry
108          * and even stat64_get!
109          */
110         old = qatomic_cmpxchg(&s->low, orig, result);
111         if (orig == old) {
112             return;
113         }
114     }
115 }
116 
117 static inline void stat64_min(Stat64 *s, uint64_t value)
118 {
119     uint32_t low, high;
120     uint32_t orig_low, orig_high;
121 
122     high = value >> 32;
123     low = (uint32_t) value;
124     do {
125         orig_high = qatomic_read(&s->high);
126         if (orig_high < high) {
127             return;
128         }
129 
130         if (orig_high == high) {
131             /* High 32 bits are equal.  Read low after high, otherwise we
132              * can get a false positive (e.g. 0x1235,0x0000 changes to
133              * 0x1234,0x8000 and we read it as 0x1234,0x0000). Pairs with
134              * the write barrier in stat64_min_slow.
135              */
136             smp_rmb();
137             orig_low = qatomic_read(&s->low);
138             if (orig_low <= low) {
139                 return;
140             }
141 
142             /* See if we were lucky and a writer raced against us.  The
143              * barrier is theoretically unnecessary, but if we remove it
144              * we may miss being lucky.
145              */
146             smp_rmb();
147             orig_high = qatomic_read(&s->high);
148             if (orig_high < high) {
149                 return;
150             }
151         }
152 
153         /* If the value changes in any way, we have to take the lock.  */
154     } while (!stat64_min_slow(s, value));
155 }
156 
157 static inline void stat64_max(Stat64 *s, uint64_t value)
158 {
159     uint32_t low, high;
160     uint32_t orig_low, orig_high;
161 
162     high = value >> 32;
163     low = (uint32_t) value;
164     do {
165         orig_high = qatomic_read(&s->high);
166         if (orig_high > high) {
167             return;
168         }
169 
170         if (orig_high == high) {
171             /* High 32 bits are equal.  Read low after high, otherwise we
172              * can get a false positive (e.g. 0x1234,0x8000 changes to
173              * 0x1235,0x0000 and we read it as 0x1235,0x8000). Pairs with
174              * the write barrier in stat64_max_slow.
175              */
176             smp_rmb();
177             orig_low = qatomic_read(&s->low);
178             if (orig_low >= low) {
179                 return;
180             }
181 
182             /* See if we were lucky and a writer raced against us.  The
183              * barrier is theoretically unnecessary, but if we remove it
184              * we may miss being lucky.
185              */
186             smp_rmb();
187             orig_high = qatomic_read(&s->high);
188             if (orig_high > high) {
189                 return;
190             }
191         }
192 
193         /* If the value changes in any way, we have to take the lock.  */
194     } while (!stat64_max_slow(s, value));
195 }
196 
197 #endif
198 
199 #endif
200