xref: /qemu/include/exec/ram_addr.h (revision 603476c2)
1 /*
2  * Declarations for cpu physical memory functions
3  *
4  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5  *
6  * Authors:
7  *  Avi Kivity <avi@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or
10  * later.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 /*
15  * This header is for use by exec.c and memory.c ONLY.  Do not include it.
16  * The functions declared here will be removed soon.
17  */
18 
19 #ifndef RAM_ADDR_H
20 #define RAM_ADDR_H
21 
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24 
25 struct RAMBlock {
26     struct rcu_head rcu;
27     struct MemoryRegion *mr;
28     uint8_t *host;
29     ram_addr_t offset;
30     ram_addr_t used_length;
31     ram_addr_t max_length;
32     void (*resized)(const char*, uint64_t length, void *host);
33     uint32_t flags;
34     /* Protected by iothread lock.  */
35     char idstr[256];
36     /* RCU-enabled, writes protected by the ramlist lock */
37     QLIST_ENTRY(RAMBlock) next;
38     int fd;
39     size_t page_size;
40 };
41 
42 static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
43 {
44     return (b && b->host && offset < b->used_length) ? true : false;
45 }
46 
47 static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
48 {
49     assert(offset_in_ramblock(block, offset));
50     return (char *)block->host + offset;
51 }
52 
53 /* The dirty memory bitmap is split into fixed-size blocks to allow growth
54  * under RCU.  The bitmap for a block can be accessed as follows:
55  *
56  *   rcu_read_lock();
57  *
58  *   DirtyMemoryBlocks *blocks =
59  *       atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
60  *
61  *   ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
62  *   unsigned long *block = blocks.blocks[idx];
63  *   ...access block bitmap...
64  *
65  *   rcu_read_unlock();
66  *
67  * Remember to check for the end of the block when accessing a range of
68  * addresses.  Move on to the next block if you reach the end.
69  *
70  * Organization into blocks allows dirty memory to grow (but not shrink) under
71  * RCU.  When adding new RAMBlocks requires the dirty memory to grow, a new
72  * DirtyMemoryBlocks array is allocated with pointers to existing blocks kept
73  * the same.  Other threads can safely access existing blocks while dirty
74  * memory is being grown.  When no threads are using the old DirtyMemoryBlocks
75  * anymore it is freed by RCU (but the underlying blocks stay because they are
76  * pointed to from the new DirtyMemoryBlocks).
77  */
78 #define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8)
79 typedef struct {
80     struct rcu_head rcu;
81     unsigned long *blocks[];
82 } DirtyMemoryBlocks;
83 
84 typedef struct RAMList {
85     QemuMutex mutex;
86     RAMBlock *mru_block;
87     /* RCU-enabled, writes protected by the ramlist lock. */
88     QLIST_HEAD(, RAMBlock) blocks;
89     DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM];
90     uint32_t version;
91 } RAMList;
92 extern RAMList ram_list;
93 
94 ram_addr_t last_ram_offset(void);
95 void qemu_mutex_lock_ramlist(void);
96 void qemu_mutex_unlock_ramlist(void);
97 
98 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
99                                    bool share, const char *mem_path,
100                                    Error **errp);
101 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
102                                   MemoryRegion *mr, Error **errp);
103 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
104 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
105                                     void (*resized)(const char*,
106                                                     uint64_t length,
107                                                     void *host),
108                                     MemoryRegion *mr, Error **errp);
109 void qemu_ram_free(RAMBlock *block);
110 
111 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
112 
113 #define DIRTY_CLIENTS_ALL     ((1 << DIRTY_MEMORY_NUM) - 1)
114 #define DIRTY_CLIENTS_NOCODE  (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
115 
116 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
117                                                  ram_addr_t length,
118                                                  unsigned client)
119 {
120     DirtyMemoryBlocks *blocks;
121     unsigned long end, page;
122     unsigned long idx, offset, base;
123     bool dirty = false;
124 
125     assert(client < DIRTY_MEMORY_NUM);
126 
127     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
128     page = start >> TARGET_PAGE_BITS;
129 
130     rcu_read_lock();
131 
132     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
133 
134     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
135     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
136     base = page - offset;
137     while (page < end) {
138         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
139         unsigned long num = next - base;
140         unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
141         if (found < num) {
142             dirty = true;
143             break;
144         }
145 
146         page = next;
147         idx++;
148         offset = 0;
149         base += DIRTY_MEMORY_BLOCK_SIZE;
150     }
151 
152     rcu_read_unlock();
153 
154     return dirty;
155 }
156 
157 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
158                                                  ram_addr_t length,
159                                                  unsigned client)
160 {
161     DirtyMemoryBlocks *blocks;
162     unsigned long end, page;
163     unsigned long idx, offset, base;
164     bool dirty = true;
165 
166     assert(client < DIRTY_MEMORY_NUM);
167 
168     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
169     page = start >> TARGET_PAGE_BITS;
170 
171     rcu_read_lock();
172 
173     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
174 
175     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
176     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
177     base = page - offset;
178     while (page < end) {
179         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
180         unsigned long num = next - base;
181         unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
182         if (found < num) {
183             dirty = false;
184             break;
185         }
186 
187         page = next;
188         idx++;
189         offset = 0;
190         base += DIRTY_MEMORY_BLOCK_SIZE;
191     }
192 
193     rcu_read_unlock();
194 
195     return dirty;
196 }
197 
198 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
199                                                       unsigned client)
200 {
201     return cpu_physical_memory_get_dirty(addr, 1, client);
202 }
203 
204 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
205 {
206     bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
207     bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
208     bool migration =
209         cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
210     return !(vga && code && migration);
211 }
212 
213 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
214                                                                ram_addr_t length,
215                                                                uint8_t mask)
216 {
217     uint8_t ret = 0;
218 
219     if (mask & (1 << DIRTY_MEMORY_VGA) &&
220         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
221         ret |= (1 << DIRTY_MEMORY_VGA);
222     }
223     if (mask & (1 << DIRTY_MEMORY_CODE) &&
224         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
225         ret |= (1 << DIRTY_MEMORY_CODE);
226     }
227     if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
228         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
229         ret |= (1 << DIRTY_MEMORY_MIGRATION);
230     }
231     return ret;
232 }
233 
234 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
235                                                       unsigned client)
236 {
237     unsigned long page, idx, offset;
238     DirtyMemoryBlocks *blocks;
239 
240     assert(client < DIRTY_MEMORY_NUM);
241 
242     page = addr >> TARGET_PAGE_BITS;
243     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
244     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
245 
246     rcu_read_lock();
247 
248     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
249 
250     set_bit_atomic(offset, blocks->blocks[idx]);
251 
252     rcu_read_unlock();
253 }
254 
255 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
256                                                        ram_addr_t length,
257                                                        uint8_t mask)
258 {
259     DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
260     unsigned long end, page;
261     unsigned long idx, offset, base;
262     int i;
263 
264     if (!mask && !xen_enabled()) {
265         return;
266     }
267 
268     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
269     page = start >> TARGET_PAGE_BITS;
270 
271     rcu_read_lock();
272 
273     for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
274         blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
275     }
276 
277     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
278     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
279     base = page - offset;
280     while (page < end) {
281         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
282 
283         if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
284             bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
285                               offset, next - page);
286         }
287         if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
288             bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
289                               offset, next - page);
290         }
291         if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
292             bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
293                               offset, next - page);
294         }
295 
296         page = next;
297         idx++;
298         offset = 0;
299         base += DIRTY_MEMORY_BLOCK_SIZE;
300     }
301 
302     rcu_read_unlock();
303 
304     xen_modified_memory(start, length);
305 }
306 
307 #if !defined(_WIN32)
308 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
309                                                           ram_addr_t start,
310                                                           ram_addr_t pages)
311 {
312     unsigned long i, j;
313     unsigned long page_number, c;
314     hwaddr addr;
315     ram_addr_t ram_addr;
316     unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
317     unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
318     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
319 
320     /* start address is aligned at the start of a word? */
321     if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
322         (hpratio == 1)) {
323         unsigned long **blocks[DIRTY_MEMORY_NUM];
324         unsigned long idx;
325         unsigned long offset;
326         long k;
327         long nr = BITS_TO_LONGS(pages);
328 
329         idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
330         offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
331                           DIRTY_MEMORY_BLOCK_SIZE);
332 
333         rcu_read_lock();
334 
335         for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
336             blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
337         }
338 
339         for (k = 0; k < nr; k++) {
340             if (bitmap[k]) {
341                 unsigned long temp = leul_to_cpu(bitmap[k]);
342 
343                 atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
344                 atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
345                 if (tcg_enabled()) {
346                     atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
347                 }
348             }
349 
350             if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
351                 offset = 0;
352                 idx++;
353             }
354         }
355 
356         rcu_read_unlock();
357 
358         xen_modified_memory(start, pages << TARGET_PAGE_BITS);
359     } else {
360         uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
361         /*
362          * bitmap-traveling is faster than memory-traveling (for addr...)
363          * especially when most of the memory is not dirty.
364          */
365         for (i = 0; i < len; i++) {
366             if (bitmap[i] != 0) {
367                 c = leul_to_cpu(bitmap[i]);
368                 do {
369                     j = ctzl(c);
370                     c &= ~(1ul << j);
371                     page_number = (i * HOST_LONG_BITS + j) * hpratio;
372                     addr = page_number * TARGET_PAGE_SIZE;
373                     ram_addr = start + addr;
374                     cpu_physical_memory_set_dirty_range(ram_addr,
375                                        TARGET_PAGE_SIZE * hpratio, clients);
376                 } while (c != 0);
377             }
378         }
379     }
380 }
381 #endif /* not _WIN32 */
382 
383 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
384                                               ram_addr_t length,
385                                               unsigned client);
386 
387 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
388                                                          ram_addr_t length)
389 {
390     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
391     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
392     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
393 }
394 
395 
396 static inline
397 uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
398                                                ram_addr_t start,
399                                                ram_addr_t length)
400 {
401     ram_addr_t addr;
402     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
403     uint64_t num_dirty = 0;
404 
405     /* start address is aligned at the start of a word? */
406     if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
407         int k;
408         int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
409         unsigned long * const *src;
410         unsigned long idx = (page * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
411         unsigned long offset = BIT_WORD((page * BITS_PER_LONG) %
412                                         DIRTY_MEMORY_BLOCK_SIZE);
413 
414         rcu_read_lock();
415 
416         src = atomic_rcu_read(
417                 &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
418 
419         for (k = page; k < page + nr; k++) {
420             if (src[idx][offset]) {
421                 unsigned long bits = atomic_xchg(&src[idx][offset], 0);
422                 unsigned long new_dirty;
423                 new_dirty = ~dest[k];
424                 dest[k] |= bits;
425                 new_dirty &= bits;
426                 num_dirty += ctpopl(new_dirty);
427             }
428 
429             if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
430                 offset = 0;
431                 idx++;
432             }
433         }
434 
435         rcu_read_unlock();
436     } else {
437         for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
438             if (cpu_physical_memory_test_and_clear_dirty(
439                         start + addr,
440                         TARGET_PAGE_SIZE,
441                         DIRTY_MEMORY_MIGRATION)) {
442                 long k = (start + addr) >> TARGET_PAGE_BITS;
443                 if (!test_and_set_bit(k, dest)) {
444                     num_dirty++;
445                 }
446             }
447         }
448     }
449 
450     return num_dirty;
451 }
452 
453 void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
454 #endif
455 #endif
456