xref: /qemu/include/exec/ram_addr.h (revision b30d1886)
1 /*
2  * Declarations for cpu physical memory functions
3  *
4  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5  *
6  * Authors:
7  *  Avi Kivity <avi@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or
10  * later.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 /*
15  * This header is for use by exec.c and memory.c ONLY.  Do not include it.
16  * The functions declared here will be removed soon.
17  */
18 
19 #ifndef RAM_ADDR_H
20 #define RAM_ADDR_H
21 
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24 #include "exec/ramlist.h"
25 
26 struct RAMBlock {
27     struct rcu_head rcu;
28     struct MemoryRegion *mr;
29     uint8_t *host;
30     ram_addr_t offset;
31     ram_addr_t used_length;
32     ram_addr_t max_length;
33     void (*resized)(const char*, uint64_t length, void *host);
34     uint32_t flags;
35     /* Protected by iothread lock.  */
36     char idstr[256];
37     /* RCU-enabled, writes protected by the ramlist lock */
38     QLIST_ENTRY(RAMBlock) next;
39     QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
40     int fd;
41     size_t page_size;
42 };
43 
44 static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
45 {
46     return (b && b->host && offset < b->used_length) ? true : false;
47 }
48 
49 static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
50 {
51     assert(offset_in_ramblock(block, offset));
52     return (char *)block->host + offset;
53 }
54 
55 ram_addr_t last_ram_offset(void);
56 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
57                                    bool share, const char *mem_path,
58                                    Error **errp);
59 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
60                                   MemoryRegion *mr, Error **errp);
61 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
62 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
63                                     void (*resized)(const char*,
64                                                     uint64_t length,
65                                                     void *host),
66                                     MemoryRegion *mr, Error **errp);
67 void qemu_ram_free(RAMBlock *block);
68 
69 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
70 
71 #define DIRTY_CLIENTS_ALL     ((1 << DIRTY_MEMORY_NUM) - 1)
72 #define DIRTY_CLIENTS_NOCODE  (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
73 
74 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
75                                                  ram_addr_t length,
76                                                  unsigned client)
77 {
78     DirtyMemoryBlocks *blocks;
79     unsigned long end, page;
80     unsigned long idx, offset, base;
81     bool dirty = false;
82 
83     assert(client < DIRTY_MEMORY_NUM);
84 
85     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
86     page = start >> TARGET_PAGE_BITS;
87 
88     rcu_read_lock();
89 
90     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
91 
92     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
93     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
94     base = page - offset;
95     while (page < end) {
96         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
97         unsigned long num = next - base;
98         unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
99         if (found < num) {
100             dirty = true;
101             break;
102         }
103 
104         page = next;
105         idx++;
106         offset = 0;
107         base += DIRTY_MEMORY_BLOCK_SIZE;
108     }
109 
110     rcu_read_unlock();
111 
112     return dirty;
113 }
114 
115 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
116                                                  ram_addr_t length,
117                                                  unsigned client)
118 {
119     DirtyMemoryBlocks *blocks;
120     unsigned long end, page;
121     unsigned long idx, offset, base;
122     bool dirty = true;
123 
124     assert(client < DIRTY_MEMORY_NUM);
125 
126     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
127     page = start >> TARGET_PAGE_BITS;
128 
129     rcu_read_lock();
130 
131     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
132 
133     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
134     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
135     base = page - offset;
136     while (page < end) {
137         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
138         unsigned long num = next - base;
139         unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
140         if (found < num) {
141             dirty = false;
142             break;
143         }
144 
145         page = next;
146         idx++;
147         offset = 0;
148         base += DIRTY_MEMORY_BLOCK_SIZE;
149     }
150 
151     rcu_read_unlock();
152 
153     return dirty;
154 }
155 
156 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
157                                                       unsigned client)
158 {
159     return cpu_physical_memory_get_dirty(addr, 1, client);
160 }
161 
162 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
163 {
164     bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
165     bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
166     bool migration =
167         cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
168     return !(vga && code && migration);
169 }
170 
171 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
172                                                                ram_addr_t length,
173                                                                uint8_t mask)
174 {
175     uint8_t ret = 0;
176 
177     if (mask & (1 << DIRTY_MEMORY_VGA) &&
178         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
179         ret |= (1 << DIRTY_MEMORY_VGA);
180     }
181     if (mask & (1 << DIRTY_MEMORY_CODE) &&
182         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
183         ret |= (1 << DIRTY_MEMORY_CODE);
184     }
185     if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
186         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
187         ret |= (1 << DIRTY_MEMORY_MIGRATION);
188     }
189     return ret;
190 }
191 
192 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
193                                                       unsigned client)
194 {
195     unsigned long page, idx, offset;
196     DirtyMemoryBlocks *blocks;
197 
198     assert(client < DIRTY_MEMORY_NUM);
199 
200     page = addr >> TARGET_PAGE_BITS;
201     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
202     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
203 
204     rcu_read_lock();
205 
206     blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
207 
208     set_bit_atomic(offset, blocks->blocks[idx]);
209 
210     rcu_read_unlock();
211 }
212 
213 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
214                                                        ram_addr_t length,
215                                                        uint8_t mask)
216 {
217     DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
218     unsigned long end, page;
219     unsigned long idx, offset, base;
220     int i;
221 
222     if (!mask && !xen_enabled()) {
223         return;
224     }
225 
226     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
227     page = start >> TARGET_PAGE_BITS;
228 
229     rcu_read_lock();
230 
231     for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
232         blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
233     }
234 
235     idx = page / DIRTY_MEMORY_BLOCK_SIZE;
236     offset = page % DIRTY_MEMORY_BLOCK_SIZE;
237     base = page - offset;
238     while (page < end) {
239         unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
240 
241         if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
242             bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
243                               offset, next - page);
244         }
245         if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
246             bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
247                               offset, next - page);
248         }
249         if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
250             bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
251                               offset, next - page);
252         }
253 
254         page = next;
255         idx++;
256         offset = 0;
257         base += DIRTY_MEMORY_BLOCK_SIZE;
258     }
259 
260     rcu_read_unlock();
261 
262     xen_modified_memory(start, length);
263 }
264 
265 #if !defined(_WIN32)
266 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
267                                                           ram_addr_t start,
268                                                           ram_addr_t pages)
269 {
270     unsigned long i, j;
271     unsigned long page_number, c;
272     hwaddr addr;
273     ram_addr_t ram_addr;
274     unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
275     unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
276     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
277 
278     /* start address is aligned at the start of a word? */
279     if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
280         (hpratio == 1)) {
281         unsigned long **blocks[DIRTY_MEMORY_NUM];
282         unsigned long idx;
283         unsigned long offset;
284         long k;
285         long nr = BITS_TO_LONGS(pages);
286 
287         idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
288         offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
289                           DIRTY_MEMORY_BLOCK_SIZE);
290 
291         rcu_read_lock();
292 
293         for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
294             blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
295         }
296 
297         for (k = 0; k < nr; k++) {
298             if (bitmap[k]) {
299                 unsigned long temp = leul_to_cpu(bitmap[k]);
300 
301                 atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
302                 atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
303                 if (tcg_enabled()) {
304                     atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
305                 }
306             }
307 
308             if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
309                 offset = 0;
310                 idx++;
311             }
312         }
313 
314         rcu_read_unlock();
315 
316         xen_modified_memory(start, pages << TARGET_PAGE_BITS);
317     } else {
318         uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
319         /*
320          * bitmap-traveling is faster than memory-traveling (for addr...)
321          * especially when most of the memory is not dirty.
322          */
323         for (i = 0; i < len; i++) {
324             if (bitmap[i] != 0) {
325                 c = leul_to_cpu(bitmap[i]);
326                 do {
327                     j = ctzl(c);
328                     c &= ~(1ul << j);
329                     page_number = (i * HOST_LONG_BITS + j) * hpratio;
330                     addr = page_number * TARGET_PAGE_SIZE;
331                     ram_addr = start + addr;
332                     cpu_physical_memory_set_dirty_range(ram_addr,
333                                        TARGET_PAGE_SIZE * hpratio, clients);
334                 } while (c != 0);
335             }
336         }
337     }
338 }
339 #endif /* not _WIN32 */
340 
341 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
342                                               ram_addr_t length,
343                                               unsigned client);
344 
345 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
346                                                          ram_addr_t length)
347 {
348     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
349     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
350     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
351 }
352 
353 
354 static inline
355 uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
356                                                ram_addr_t start,
357                                                ram_addr_t length)
358 {
359     ram_addr_t addr;
360     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
361     uint64_t num_dirty = 0;
362 
363     /* start address is aligned at the start of a word? */
364     if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
365         int k;
366         int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
367         unsigned long * const *src;
368         unsigned long idx = (page * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
369         unsigned long offset = BIT_WORD((page * BITS_PER_LONG) %
370                                         DIRTY_MEMORY_BLOCK_SIZE);
371 
372         rcu_read_lock();
373 
374         src = atomic_rcu_read(
375                 &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
376 
377         for (k = page; k < page + nr; k++) {
378             if (src[idx][offset]) {
379                 unsigned long bits = atomic_xchg(&src[idx][offset], 0);
380                 unsigned long new_dirty;
381                 new_dirty = ~dest[k];
382                 dest[k] |= bits;
383                 new_dirty &= bits;
384                 num_dirty += ctpopl(new_dirty);
385             }
386 
387             if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
388                 offset = 0;
389                 idx++;
390             }
391         }
392 
393         rcu_read_unlock();
394     } else {
395         for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
396             if (cpu_physical_memory_test_and_clear_dirty(
397                         start + addr,
398                         TARGET_PAGE_SIZE,
399                         DIRTY_MEMORY_MIGRATION)) {
400                 long k = (start + addr) >> TARGET_PAGE_BITS;
401                 if (!test_and_set_bit(k, dest)) {
402                     num_dirty++;
403                 }
404             }
405         }
406     }
407 
408     return num_dirty;
409 }
410 
411 void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
412 #endif
413 #endif
414