xref: /qemu/block/qcow2-cache.c (revision cf93980e)
1 /*
2  * L2/refcount table cache for the QCOW2 format
3  *
4  * Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "block/block_int.h"
26 #include "qemu-common.h"
27 #include "qcow2.h"
28 #include "trace.h"
29 
30 typedef struct Qcow2CachedTable {
31     void*   table;
32     int64_t offset;
33     bool    dirty;
34     int     cache_hits;
35     int     ref;
36 } Qcow2CachedTable;
37 
38 struct Qcow2Cache {
39     Qcow2CachedTable*       entries;
40     struct Qcow2Cache*      depends;
41     int                     size;
42     bool                    depends_on_flush;
43 };
44 
45 Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables)
46 {
47     BDRVQcowState *s = bs->opaque;
48     Qcow2Cache *c;
49     int i;
50 
51     c = g_malloc0(sizeof(*c));
52     c->size = num_tables;
53     c->entries = g_malloc0(sizeof(*c->entries) * num_tables);
54 
55     for (i = 0; i < c->size; i++) {
56         c->entries[i].table = qemu_blockalign(bs, s->cluster_size);
57     }
58 
59     return c;
60 }
61 
62 int qcow2_cache_destroy(BlockDriverState* bs, Qcow2Cache *c)
63 {
64     int i;
65 
66     for (i = 0; i < c->size; i++) {
67         assert(c->entries[i].ref == 0);
68         qemu_vfree(c->entries[i].table);
69     }
70 
71     g_free(c->entries);
72     g_free(c);
73 
74     return 0;
75 }
76 
77 static int qcow2_cache_flush_dependency(BlockDriverState *bs, Qcow2Cache *c)
78 {
79     int ret;
80 
81     ret = qcow2_cache_flush(bs, c->depends);
82     if (ret < 0) {
83         return ret;
84     }
85 
86     c->depends = NULL;
87     c->depends_on_flush = false;
88 
89     return 0;
90 }
91 
92 static int qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i)
93 {
94     BDRVQcowState *s = bs->opaque;
95     int ret = 0;
96 
97     if (!c->entries[i].dirty || !c->entries[i].offset) {
98         return 0;
99     }
100 
101     trace_qcow2_cache_entry_flush(qemu_coroutine_self(),
102                                   c == s->l2_table_cache, i);
103 
104     if (c->depends) {
105         ret = qcow2_cache_flush_dependency(bs, c);
106     } else if (c->depends_on_flush) {
107         ret = bdrv_flush(bs->file);
108         if (ret >= 0) {
109             c->depends_on_flush = false;
110         }
111     }
112 
113     if (ret < 0) {
114         return ret;
115     }
116 
117     if (c == s->refcount_block_cache) {
118         ret = qcow2_pre_write_overlap_check(bs,
119                 QCOW2_OL_DEFAULT & ~QCOW2_OL_REFCOUNT_BLOCK,
120                 c->entries[i].offset, s->cluster_size);
121     } else if (c == s->l2_table_cache) {
122         ret = qcow2_pre_write_overlap_check(bs,
123                 QCOW2_OL_DEFAULT & ~QCOW2_OL_ACTIVE_L2,
124                 c->entries[i].offset, s->cluster_size);
125     } else {
126         ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT,
127                 c->entries[i].offset, s->cluster_size);
128     }
129 
130     if (ret < 0) {
131         return ret;
132     }
133 
134     if (c == s->refcount_block_cache) {
135         BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_UPDATE_PART);
136     } else if (c == s->l2_table_cache) {
137         BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE);
138     }
139 
140     ret = bdrv_pwrite(bs->file, c->entries[i].offset, c->entries[i].table,
141         s->cluster_size);
142     if (ret < 0) {
143         return ret;
144     }
145 
146     c->entries[i].dirty = false;
147 
148     return 0;
149 }
150 
151 int qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c)
152 {
153     BDRVQcowState *s = bs->opaque;
154     int result = 0;
155     int ret;
156     int i;
157 
158     trace_qcow2_cache_flush(qemu_coroutine_self(), c == s->l2_table_cache);
159 
160     for (i = 0; i < c->size; i++) {
161         ret = qcow2_cache_entry_flush(bs, c, i);
162         if (ret < 0 && result != -ENOSPC) {
163             result = ret;
164         }
165     }
166 
167     if (result == 0) {
168         ret = bdrv_flush(bs->file);
169         if (ret < 0) {
170             result = ret;
171         }
172     }
173 
174     return result;
175 }
176 
177 int qcow2_cache_set_dependency(BlockDriverState *bs, Qcow2Cache *c,
178     Qcow2Cache *dependency)
179 {
180     int ret;
181 
182     if (dependency->depends) {
183         ret = qcow2_cache_flush_dependency(bs, dependency);
184         if (ret < 0) {
185             return ret;
186         }
187     }
188 
189     if (c->depends && (c->depends != dependency)) {
190         ret = qcow2_cache_flush_dependency(bs, c);
191         if (ret < 0) {
192             return ret;
193         }
194     }
195 
196     c->depends = dependency;
197     return 0;
198 }
199 
200 void qcow2_cache_depends_on_flush(Qcow2Cache *c)
201 {
202     c->depends_on_flush = true;
203 }
204 
205 static int qcow2_cache_find_entry_to_replace(Qcow2Cache *c)
206 {
207     int i;
208     int min_count = INT_MAX;
209     int min_index = -1;
210 
211 
212     for (i = 0; i < c->size; i++) {
213         if (c->entries[i].ref) {
214             continue;
215         }
216 
217         if (c->entries[i].cache_hits < min_count) {
218             min_index = i;
219             min_count = c->entries[i].cache_hits;
220         }
221 
222         /* Give newer hits priority */
223         /* TODO Check how to optimize the replacement strategy */
224         c->entries[i].cache_hits /= 2;
225     }
226 
227     if (min_index == -1) {
228         /* This can't happen in current synchronous code, but leave the check
229          * here as a reminder for whoever starts using AIO with the cache */
230         abort();
231     }
232     return min_index;
233 }
234 
235 static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c,
236     uint64_t offset, void **table, bool read_from_disk)
237 {
238     BDRVQcowState *s = bs->opaque;
239     int i;
240     int ret;
241 
242     trace_qcow2_cache_get(qemu_coroutine_self(), c == s->l2_table_cache,
243                           offset, read_from_disk);
244 
245     /* Check if the table is already cached */
246     for (i = 0; i < c->size; i++) {
247         if (c->entries[i].offset == offset) {
248             goto found;
249         }
250     }
251 
252     /* If not, write a table back and replace it */
253     i = qcow2_cache_find_entry_to_replace(c);
254     trace_qcow2_cache_get_replace_entry(qemu_coroutine_self(),
255                                         c == s->l2_table_cache, i);
256     if (i < 0) {
257         return i;
258     }
259 
260     ret = qcow2_cache_entry_flush(bs, c, i);
261     if (ret < 0) {
262         return ret;
263     }
264 
265     trace_qcow2_cache_get_read(qemu_coroutine_self(),
266                                c == s->l2_table_cache, i);
267     c->entries[i].offset = 0;
268     if (read_from_disk) {
269         if (c == s->l2_table_cache) {
270             BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD);
271         }
272 
273         ret = bdrv_pread(bs->file, offset, c->entries[i].table, s->cluster_size);
274         if (ret < 0) {
275             return ret;
276         }
277     }
278 
279     /* Give the table some hits for the start so that it won't be replaced
280      * immediately. The number 32 is completely arbitrary. */
281     c->entries[i].cache_hits = 32;
282     c->entries[i].offset = offset;
283 
284     /* And return the right table */
285 found:
286     c->entries[i].cache_hits++;
287     c->entries[i].ref++;
288     *table = c->entries[i].table;
289 
290     trace_qcow2_cache_get_done(qemu_coroutine_self(),
291                                c == s->l2_table_cache, i);
292 
293     return 0;
294 }
295 
296 int qcow2_cache_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
297     void **table)
298 {
299     return qcow2_cache_do_get(bs, c, offset, table, true);
300 }
301 
302 int qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
303     void **table)
304 {
305     return qcow2_cache_do_get(bs, c, offset, table, false);
306 }
307 
308 int qcow2_cache_put(BlockDriverState *bs, Qcow2Cache *c, void **table)
309 {
310     int i;
311 
312     for (i = 0; i < c->size; i++) {
313         if (c->entries[i].table == *table) {
314             goto found;
315         }
316     }
317     return -ENOENT;
318 
319 found:
320     c->entries[i].ref--;
321     *table = NULL;
322 
323     assert(c->entries[i].ref >= 0);
324     return 0;
325 }
326 
327 void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table)
328 {
329     int i;
330 
331     for (i = 0; i < c->size; i++) {
332         if (c->entries[i].table == table) {
333             goto found;
334         }
335     }
336     abort();
337 
338 found:
339     c->entries[i].dirty = true;
340 }
341