xref: /qemu/migration/page_cache.c (revision f2a3b549)
1 /*
2  * Page cache for QEMU
3  * The cache is base on a hash of the page address
4  *
5  * Copyright 2012 Red Hat, Inc. and/or its affiliates
6  *
7  * Authors:
8  *  Orit Wasserman  <owasserm@redhat.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2 or later.
11  * See the COPYING file in the top-level directory.
12  *
13  */
14 
15 #include "qemu/osdep.h"
16 
17 #include "qapi/qmp/qerror.h"
18 #include "qapi/error.h"
19 #include "qemu-common.h"
20 #include "qemu/host-utils.h"
21 #include "page_cache.h"
22 
23 #ifdef DEBUG_CACHE
24 #define DPRINTF(fmt, ...) \
25     do { fprintf(stdout, "cache: " fmt, ## __VA_ARGS__); } while (0)
26 #else
27 #define DPRINTF(fmt, ...) \
28     do { } while (0)
29 #endif
30 
31 /* the page in cache will not be replaced in two cycles */
32 #define CACHED_PAGE_LIFETIME 2
33 
34 typedef struct CacheItem CacheItem;
35 
36 struct CacheItem {
37     uint64_t it_addr;
38     uint64_t it_age;
39     uint8_t *it_data;
40 };
41 
42 struct PageCache {
43     CacheItem *page_cache;
44     size_t page_size;
45     size_t max_num_items;
46     size_t num_items;
47 };
48 
49 PageCache *cache_init(int64_t new_size, size_t page_size, Error **errp)
50 {
51     int64_t i;
52     size_t num_pages = new_size / page_size;
53     PageCache *cache;
54 
55     if (new_size < page_size) {
56         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
57                    "is smaller than one target page size");
58         return NULL;
59     }
60 
61     /* round down to the nearest power of 2 */
62     if (!is_power_of_2(num_pages)) {
63         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
64                    "is not a power of two number of pages");
65         return NULL;
66     }
67 
68     /* We prefer not to abort if there is no memory */
69     cache = g_try_malloc(sizeof(*cache));
70     if (!cache) {
71         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
72                    "Failed to allocate cache");
73         return NULL;
74     }
75     cache->page_size = page_size;
76     cache->num_items = 0;
77     cache->max_num_items = num_pages;
78 
79     DPRINTF("Setting cache buckets to %" PRId64 "\n", cache->max_num_items);
80 
81     /* We prefer not to abort if there is no memory */
82     cache->page_cache = g_try_malloc((cache->max_num_items) *
83                                      sizeof(*cache->page_cache));
84     if (!cache->page_cache) {
85         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
86                    "Failed to allocate page cache");
87         g_free(cache);
88         return NULL;
89     }
90 
91     for (i = 0; i < cache->max_num_items; i++) {
92         cache->page_cache[i].it_data = NULL;
93         cache->page_cache[i].it_age = 0;
94         cache->page_cache[i].it_addr = -1;
95     }
96 
97     return cache;
98 }
99 
100 void cache_fini(PageCache *cache)
101 {
102     int64_t i;
103 
104     g_assert(cache);
105     g_assert(cache->page_cache);
106 
107     for (i = 0; i < cache->max_num_items; i++) {
108         g_free(cache->page_cache[i].it_data);
109     }
110 
111     g_free(cache->page_cache);
112     cache->page_cache = NULL;
113     g_free(cache);
114 }
115 
116 static size_t cache_get_cache_pos(const PageCache *cache,
117                                   uint64_t address)
118 {
119     g_assert(cache->max_num_items);
120     return (address / cache->page_size) & (cache->max_num_items - 1);
121 }
122 
123 static CacheItem *cache_get_by_addr(const PageCache *cache, uint64_t addr)
124 {
125     size_t pos;
126 
127     g_assert(cache);
128     g_assert(cache->page_cache);
129 
130     pos = cache_get_cache_pos(cache, addr);
131 
132     return &cache->page_cache[pos];
133 }
134 
135 uint8_t *get_cached_data(const PageCache *cache, uint64_t addr)
136 {
137     return cache_get_by_addr(cache, addr)->it_data;
138 }
139 
140 bool cache_is_cached(const PageCache *cache, uint64_t addr,
141                      uint64_t current_age)
142 {
143     CacheItem *it;
144 
145     it = cache_get_by_addr(cache, addr);
146 
147     if (it->it_addr == addr) {
148         /* update the it_age when the cache hit */
149         it->it_age = current_age;
150         return true;
151     }
152     return false;
153 }
154 
155 int cache_insert(PageCache *cache, uint64_t addr, const uint8_t *pdata,
156                  uint64_t current_age)
157 {
158 
159     CacheItem *it;
160 
161     /* actual update of entry */
162     it = cache_get_by_addr(cache, addr);
163 
164     if (it->it_data && it->it_addr != addr &&
165         it->it_age + CACHED_PAGE_LIFETIME > current_age) {
166         /* the cache page is fresh, don't replace it */
167         return -1;
168     }
169     /* allocate page */
170     if (!it->it_data) {
171         it->it_data = g_try_malloc(cache->page_size);
172         if (!it->it_data) {
173             DPRINTF("Error allocating page\n");
174             return -1;
175         }
176         cache->num_items++;
177     }
178 
179     memcpy(it->it_data, pdata, cache->page_size);
180 
181     it->it_age = current_age;
182     it->it_addr = addr;
183 
184     return 0;
185 }
186