xref: /qemu/util/rcu.c (revision bfa3ab61)
1 /*
2  * urcu-mb.c
3  *
4  * Userspace RCU library with explicit memory barriers
5  *
6  * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7  * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8  * Copyright 2015 Red Hat, Inc.
9  *
10  * Ported to QEMU by Paolo Bonzini  <pbonzini@redhat.com>
11  *
12  * This library is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public
14  * License as published by the Free Software Foundation; either
15  * version 2.1 of the License, or (at your option) any later version.
16  *
17  * This library is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with this library; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25  *
26  * IBM's contributions to this file may be relicensed under LGPLv2 or later.
27  */
28 
29 #include "qemu-common.h"
30 #include <stdio.h>
31 #include <assert.h>
32 #include <stdlib.h>
33 #include <stdint.h>
34 #include <errno.h>
35 #include "qemu/rcu.h"
36 #include "qemu/atomic.h"
37 #include "qemu/thread.h"
38 #include "qemu/main-loop.h"
39 
40 /*
41  * Global grace period counter.  Bit 0 is always one in rcu_gp_ctr.
42  * Bits 1 and above are defined in synchronize_rcu.
43  */
44 #define RCU_GP_LOCKED           (1UL << 0)
45 #define RCU_GP_CTR              (1UL << 1)
46 
47 unsigned long rcu_gp_ctr = RCU_GP_LOCKED;
48 
49 QemuEvent rcu_gp_event;
50 static QemuMutex rcu_gp_lock;
51 
52 /*
53  * Check whether a quiescent state was crossed between the beginning of
54  * update_counter_and_wait and now.
55  */
56 static inline int rcu_gp_ongoing(unsigned long *ctr)
57 {
58     unsigned long v;
59 
60     v = atomic_read(ctr);
61     return v && (v != rcu_gp_ctr);
62 }
63 
64 /* Written to only by each individual reader. Read by both the reader and the
65  * writers.
66  */
67 __thread struct rcu_reader_data rcu_reader;
68 
69 /* Protected by rcu_gp_lock.  */
70 typedef QLIST_HEAD(, rcu_reader_data) ThreadList;
71 static ThreadList registry = QLIST_HEAD_INITIALIZER(registry);
72 
73 /* Wait for previous parity/grace period to be empty of readers.  */
74 static void wait_for_readers(void)
75 {
76     ThreadList qsreaders = QLIST_HEAD_INITIALIZER(qsreaders);
77     struct rcu_reader_data *index, *tmp;
78 
79     for (;;) {
80         /* We want to be notified of changes made to rcu_gp_ongoing
81          * while we walk the list.
82          */
83         qemu_event_reset(&rcu_gp_event);
84 
85         /* Instead of using atomic_mb_set for index->waiting, and
86          * atomic_mb_read for index->ctr, memory barriers are placed
87          * manually since writes to different threads are independent.
88          * atomic_mb_set has a smp_wmb before...
89          */
90         smp_wmb();
91         QLIST_FOREACH(index, &registry, node) {
92             atomic_set(&index->waiting, true);
93         }
94 
95         /* ... and a smp_mb after.  */
96         smp_mb();
97 
98         QLIST_FOREACH_SAFE(index, &registry, node, tmp) {
99             if (!rcu_gp_ongoing(&index->ctr)) {
100                 QLIST_REMOVE(index, node);
101                 QLIST_INSERT_HEAD(&qsreaders, index, node);
102 
103                 /* No need for mb_set here, worst of all we
104                  * get some extra futex wakeups.
105                  */
106                 atomic_set(&index->waiting, false);
107             }
108         }
109 
110         /* atomic_mb_read has smp_rmb after.  */
111         smp_rmb();
112 
113         if (QLIST_EMPTY(&registry)) {
114             break;
115         }
116 
117         /* Wait for one thread to report a quiescent state and
118          * try again.
119          */
120         qemu_event_wait(&rcu_gp_event);
121     }
122 
123     /* put back the reader list in the registry */
124     QLIST_SWAP(&registry, &qsreaders, node);
125 }
126 
127 void synchronize_rcu(void)
128 {
129     qemu_mutex_lock(&rcu_gp_lock);
130 
131     if (!QLIST_EMPTY(&registry)) {
132         /* In either case, the atomic_mb_set below blocks stores that free
133          * old RCU-protected pointers.
134          */
135         if (sizeof(rcu_gp_ctr) < 8) {
136             /* For architectures with 32-bit longs, a two-subphases algorithm
137              * ensures we do not encounter overflow bugs.
138              *
139              * Switch parity: 0 -> 1, 1 -> 0.
140              */
141             atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
142             wait_for_readers();
143             atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
144         } else {
145             /* Increment current grace period.  */
146             atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
147         }
148 
149         wait_for_readers();
150     }
151 
152     qemu_mutex_unlock(&rcu_gp_lock);
153 }
154 
155 
156 #define RCU_CALL_MIN_SIZE        30
157 
158 /* Multi-producer, single-consumer queue based on urcu/static/wfqueue.h
159  * from liburcu.  Note that head is only used by the consumer.
160  */
161 static struct rcu_head dummy;
162 static struct rcu_head *head = &dummy, **tail = &dummy.next;
163 static int rcu_call_count;
164 static QemuEvent rcu_call_ready_event;
165 
166 static void enqueue(struct rcu_head *node)
167 {
168     struct rcu_head **old_tail;
169 
170     node->next = NULL;
171     old_tail = atomic_xchg(&tail, &node->next);
172     atomic_mb_set(old_tail, node);
173 }
174 
175 static struct rcu_head *try_dequeue(void)
176 {
177     struct rcu_head *node, *next;
178 
179 retry:
180     /* Test for an empty list, which we do not expect.  Note that for
181      * the consumer head and tail are always consistent.  The head
182      * is consistent because only the consumer reads/writes it.
183      * The tail, because it is the first step in the enqueuing.
184      * It is only the next pointers that might be inconsistent.
185      */
186     if (head == &dummy && atomic_mb_read(&tail) == &dummy.next) {
187         abort();
188     }
189 
190     /* If the head node has NULL in its next pointer, the value is
191      * wrong and we need to wait until its enqueuer finishes the update.
192      */
193     node = head;
194     next = atomic_mb_read(&head->next);
195     if (!next) {
196         return NULL;
197     }
198 
199     /* Since we are the sole consumer, and we excluded the empty case
200      * above, the queue will always have at least two nodes: the
201      * dummy node, and the one being removed.  So we do not need to update
202      * the tail pointer.
203      */
204     head = next;
205 
206     /* If we dequeued the dummy node, add it back at the end and retry.  */
207     if (node == &dummy) {
208         enqueue(node);
209         goto retry;
210     }
211 
212     return node;
213 }
214 
215 static void *call_rcu_thread(void *opaque)
216 {
217     struct rcu_head *node;
218 
219     for (;;) {
220         int tries = 0;
221         int n = atomic_read(&rcu_call_count);
222 
223         /* Heuristically wait for a decent number of callbacks to pile up.
224          * Fetch rcu_call_count now, we only must process elements that were
225          * added before synchronize_rcu() starts.
226          */
227         while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) {
228             g_usleep(10000);
229             if (n == 0) {
230                 qemu_event_reset(&rcu_call_ready_event);
231                 n = atomic_read(&rcu_call_count);
232                 if (n == 0) {
233                     qemu_event_wait(&rcu_call_ready_event);
234                 }
235             }
236             n = atomic_read(&rcu_call_count);
237         }
238 
239         atomic_sub(&rcu_call_count, n);
240         synchronize_rcu();
241         qemu_mutex_lock_iothread();
242         while (n > 0) {
243             node = try_dequeue();
244             while (!node) {
245                 qemu_mutex_unlock_iothread();
246                 qemu_event_reset(&rcu_call_ready_event);
247                 node = try_dequeue();
248                 if (!node) {
249                     qemu_event_wait(&rcu_call_ready_event);
250                     node = try_dequeue();
251                 }
252                 qemu_mutex_lock_iothread();
253             }
254 
255             n--;
256             node->func(node);
257         }
258         qemu_mutex_unlock_iothread();
259     }
260     abort();
261 }
262 
263 void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node))
264 {
265     node->func = func;
266     enqueue(node);
267     atomic_inc(&rcu_call_count);
268     qemu_event_set(&rcu_call_ready_event);
269 }
270 
271 void rcu_register_thread(void)
272 {
273     assert(rcu_reader.ctr == 0);
274     qemu_mutex_lock(&rcu_gp_lock);
275     QLIST_INSERT_HEAD(&registry, &rcu_reader, node);
276     qemu_mutex_unlock(&rcu_gp_lock);
277 }
278 
279 void rcu_unregister_thread(void)
280 {
281     qemu_mutex_lock(&rcu_gp_lock);
282     QLIST_REMOVE(&rcu_reader, node);
283     qemu_mutex_unlock(&rcu_gp_lock);
284 }
285 
286 static void rcu_init_complete(void)
287 {
288     QemuThread thread;
289 
290     qemu_mutex_init(&rcu_gp_lock);
291     qemu_event_init(&rcu_gp_event, true);
292 
293     qemu_event_init(&rcu_call_ready_event, false);
294 
295     /* The caller is assumed to have iothread lock, so the call_rcu thread
296      * must have been quiescent even after forking, just recreate it.
297      */
298     qemu_thread_create(&thread, "call_rcu", call_rcu_thread,
299                        NULL, QEMU_THREAD_DETACHED);
300 
301     rcu_register_thread();
302 }
303 
304 #ifdef CONFIG_POSIX
305 static void rcu_init_lock(void)
306 {
307     qemu_mutex_lock(&rcu_gp_lock);
308 }
309 
310 static void rcu_init_unlock(void)
311 {
312     qemu_mutex_unlock(&rcu_gp_lock);
313 }
314 #endif
315 
316 void rcu_after_fork(void)
317 {
318     memset(&registry, 0, sizeof(registry));
319     rcu_init_complete();
320 }
321 
322 static void __attribute__((__constructor__)) rcu_init(void)
323 {
324 #ifdef CONFIG_POSIX
325     pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_unlock);
326 #endif
327     rcu_init_complete();
328 }
329