root / block / qcow2-cluster.c @ 73f5e313
History | View | Annotate | Download (27 kB)
1 |
/*
|
---|---|
2 |
* Block driver for the QCOW version 2 format
|
3 |
*
|
4 |
* Copyright (c) 2004-2006 Fabrice Bellard
|
5 |
*
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
8 |
* in the Software without restriction, including without limitation the rights
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
11 |
* furnished to do so, subject to the following conditions:
|
12 |
*
|
13 |
* The above copyright notice and this permission notice shall be included in
|
14 |
* all copies or substantial portions of the Software.
|
15 |
*
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 |
* THE SOFTWARE.
|
23 |
*/
|
24 |
|
25 |
#include <zlib.h> |
26 |
|
27 |
#include "qemu-common.h" |
28 |
#include "block_int.h" |
29 |
#include "block/qcow2.h" |
30 |
|
31 |
int qcow2_grow_l1_table(BlockDriverState *bs, int min_size, bool exact_size) |
32 |
{ |
33 |
BDRVQcowState *s = bs->opaque; |
34 |
int new_l1_size, new_l1_size2, ret, i;
|
35 |
uint64_t *new_l1_table; |
36 |
int64_t new_l1_table_offset; |
37 |
uint8_t data[12];
|
38 |
|
39 |
if (min_size <= s->l1_size)
|
40 |
return 0; |
41 |
|
42 |
if (exact_size) {
|
43 |
new_l1_size = min_size; |
44 |
} else {
|
45 |
/* Bump size up to reduce the number of times we have to grow */
|
46 |
new_l1_size = s->l1_size; |
47 |
if (new_l1_size == 0) { |
48 |
new_l1_size = 1;
|
49 |
} |
50 |
while (min_size > new_l1_size) {
|
51 |
new_l1_size = (new_l1_size * 3 + 1) / 2; |
52 |
} |
53 |
} |
54 |
|
55 |
#ifdef DEBUG_ALLOC2
|
56 |
fprintf(stderr, "grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
|
57 |
#endif
|
58 |
|
59 |
new_l1_size2 = sizeof(uint64_t) * new_l1_size;
|
60 |
new_l1_table = g_malloc0(align_offset(new_l1_size2, 512));
|
61 |
memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
|
62 |
|
63 |
/* write new table (align to cluster) */
|
64 |
BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); |
65 |
new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); |
66 |
if (new_l1_table_offset < 0) { |
67 |
g_free(new_l1_table); |
68 |
return new_l1_table_offset;
|
69 |
} |
70 |
|
71 |
ret = qcow2_cache_flush(bs, s->refcount_block_cache); |
72 |
if (ret < 0) { |
73 |
goto fail;
|
74 |
} |
75 |
|
76 |
BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); |
77 |
for(i = 0; i < s->l1_size; i++) |
78 |
new_l1_table[i] = cpu_to_be64(new_l1_table[i]); |
79 |
ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2); |
80 |
if (ret < 0) |
81 |
goto fail;
|
82 |
for(i = 0; i < s->l1_size; i++) |
83 |
new_l1_table[i] = be64_to_cpu(new_l1_table[i]); |
84 |
|
85 |
/* set new table */
|
86 |
BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); |
87 |
cpu_to_be32w((uint32_t*)data, new_l1_size); |
88 |
cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset);
|
89 |
ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
|
90 |
if (ret < 0) { |
91 |
goto fail;
|
92 |
} |
93 |
g_free(s->l1_table); |
94 |
qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
|
95 |
s->l1_table_offset = new_l1_table_offset; |
96 |
s->l1_table = new_l1_table; |
97 |
s->l1_size = new_l1_size; |
98 |
return 0; |
99 |
fail:
|
100 |
g_free(new_l1_table); |
101 |
qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2); |
102 |
return ret;
|
103 |
} |
104 |
|
105 |
/*
|
106 |
* l2_load
|
107 |
*
|
108 |
* Loads a L2 table into memory. If the table is in the cache, the cache
|
109 |
* is used; otherwise the L2 table is loaded from the image file.
|
110 |
*
|
111 |
* Returns a pointer to the L2 table on success, or NULL if the read from
|
112 |
* the image file failed.
|
113 |
*/
|
114 |
|
115 |
static int l2_load(BlockDriverState *bs, uint64_t l2_offset, |
116 |
uint64_t **l2_table) |
117 |
{ |
118 |
BDRVQcowState *s = bs->opaque; |
119 |
int ret;
|
120 |
|
121 |
ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
|
122 |
|
123 |
return ret;
|
124 |
} |
125 |
|
126 |
/*
|
127 |
* Writes one sector of the L1 table to the disk (can't update single entries
|
128 |
* and we really don't want bdrv_pread to perform a read-modify-write)
|
129 |
*/
|
130 |
#define L1_ENTRIES_PER_SECTOR (512 / 8) |
131 |
static int write_l1_entry(BlockDriverState *bs, int l1_index) |
132 |
{ |
133 |
BDRVQcowState *s = bs->opaque; |
134 |
uint64_t buf[L1_ENTRIES_PER_SECTOR]; |
135 |
int l1_start_index;
|
136 |
int i, ret;
|
137 |
|
138 |
l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
|
139 |
for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) { |
140 |
buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); |
141 |
} |
142 |
|
143 |
BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); |
144 |
ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
|
145 |
buf, sizeof(buf));
|
146 |
if (ret < 0) { |
147 |
return ret;
|
148 |
} |
149 |
|
150 |
return 0; |
151 |
} |
152 |
|
153 |
/*
|
154 |
* l2_allocate
|
155 |
*
|
156 |
* Allocate a new l2 entry in the file. If l1_index points to an already
|
157 |
* used entry in the L2 table (i.e. we are doing a copy on write for the L2
|
158 |
* table) copy the contents of the old L2 table into the newly allocated one.
|
159 |
* Otherwise the new table is initialized with zeros.
|
160 |
*
|
161 |
*/
|
162 |
|
163 |
static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) |
164 |
{ |
165 |
BDRVQcowState *s = bs->opaque; |
166 |
uint64_t old_l2_offset; |
167 |
uint64_t *l2_table; |
168 |
int64_t l2_offset; |
169 |
int ret;
|
170 |
|
171 |
old_l2_offset = s->l1_table[l1_index]; |
172 |
|
173 |
/* allocate a new l2 entry */
|
174 |
|
175 |
l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
|
176 |
if (l2_offset < 0) { |
177 |
return l2_offset;
|
178 |
} |
179 |
|
180 |
ret = qcow2_cache_flush(bs, s->refcount_block_cache); |
181 |
if (ret < 0) { |
182 |
goto fail;
|
183 |
} |
184 |
|
185 |
/* allocate a new entry in the l2 cache */
|
186 |
|
187 |
ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
|
188 |
if (ret < 0) { |
189 |
return ret;
|
190 |
} |
191 |
|
192 |
l2_table = *table; |
193 |
|
194 |
if (old_l2_offset == 0) { |
195 |
/* if there was no old l2 table, clear the new table */
|
196 |
memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); |
197 |
} else {
|
198 |
uint64_t* old_table; |
199 |
|
200 |
/* if there was an old l2 table, read it from the disk */
|
201 |
BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); |
202 |
ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_offset, |
203 |
(void**) &old_table);
|
204 |
if (ret < 0) { |
205 |
goto fail;
|
206 |
} |
207 |
|
208 |
memcpy(l2_table, old_table, s->cluster_size); |
209 |
|
210 |
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table);
|
211 |
if (ret < 0) { |
212 |
goto fail;
|
213 |
} |
214 |
} |
215 |
|
216 |
/* write the l2 table to the file */
|
217 |
BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); |
218 |
|
219 |
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
220 |
ret = qcow2_cache_flush(bs, s->l2_table_cache); |
221 |
if (ret < 0) { |
222 |
goto fail;
|
223 |
} |
224 |
|
225 |
/* update the L1 entry */
|
226 |
s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; |
227 |
ret = write_l1_entry(bs, l1_index); |
228 |
if (ret < 0) { |
229 |
goto fail;
|
230 |
} |
231 |
|
232 |
*table = l2_table; |
233 |
return 0; |
234 |
|
235 |
fail:
|
236 |
qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
|
237 |
s->l1_table[l1_index] = old_l2_offset; |
238 |
return ret;
|
239 |
} |
240 |
|
241 |
static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, |
242 |
uint64_t *l2_table, uint64_t start, uint64_t mask) |
243 |
{ |
244 |
int i;
|
245 |
uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;
|
246 |
|
247 |
if (!offset)
|
248 |
return 0; |
249 |
|
250 |
for (i = start; i < start + nb_clusters; i++)
|
251 |
if (offset + (uint64_t) i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
|
252 |
break;
|
253 |
|
254 |
return (i - start);
|
255 |
} |
256 |
|
257 |
static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) |
258 |
{ |
259 |
int i = 0; |
260 |
|
261 |
while(nb_clusters-- && l2_table[i] == 0) |
262 |
i++; |
263 |
|
264 |
return i;
|
265 |
} |
266 |
|
267 |
/* The crypt function is compatible with the linux cryptoloop
|
268 |
algorithm for < 4 GB images. NOTE: out_buf == in_buf is
|
269 |
supported */
|
270 |
void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
|
271 |
uint8_t *out_buf, const uint8_t *in_buf,
|
272 |
int nb_sectors, int enc, |
273 |
const AES_KEY *key)
|
274 |
{ |
275 |
union {
|
276 |
uint64_t ll[2];
|
277 |
uint8_t b[16];
|
278 |
} ivec; |
279 |
int i;
|
280 |
|
281 |
for(i = 0; i < nb_sectors; i++) { |
282 |
ivec.ll[0] = cpu_to_le64(sector_num);
|
283 |
ivec.ll[1] = 0; |
284 |
AES_cbc_encrypt(in_buf, out_buf, 512, key,
|
285 |
ivec.b, enc); |
286 |
sector_num++; |
287 |
in_buf += 512;
|
288 |
out_buf += 512;
|
289 |
} |
290 |
} |
291 |
|
292 |
static int coroutine_fn copy_sectors(BlockDriverState *bs, |
293 |
uint64_t start_sect, |
294 |
uint64_t cluster_offset, |
295 |
int n_start, int n_end) |
296 |
{ |
297 |
BDRVQcowState *s = bs->opaque; |
298 |
QEMUIOVector qiov; |
299 |
struct iovec iov;
|
300 |
int n, ret;
|
301 |
|
302 |
/*
|
303 |
* If this is the last cluster and it is only partially used, we must only
|
304 |
* copy until the end of the image, or bdrv_check_request will fail for the
|
305 |
* bdrv_read/write calls below.
|
306 |
*/
|
307 |
if (start_sect + n_end > bs->total_sectors) {
|
308 |
n_end = bs->total_sectors - start_sect; |
309 |
} |
310 |
|
311 |
n = n_end - n_start; |
312 |
if (n <= 0) { |
313 |
return 0; |
314 |
} |
315 |
|
316 |
iov.iov_len = n * BDRV_SECTOR_SIZE; |
317 |
iov.iov_base = qemu_blockalign(bs, iov.iov_len); |
318 |
|
319 |
qemu_iovec_init_external(&qiov, &iov, 1);
|
320 |
|
321 |
BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); |
322 |
|
323 |
/* Call .bdrv_co_readv() directly instead of using the public block-layer
|
324 |
* interface. This avoids double I/O throttling and request tracking,
|
325 |
* which can lead to deadlock when block layer copy-on-read is enabled.
|
326 |
*/
|
327 |
ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov); |
328 |
if (ret < 0) { |
329 |
goto out;
|
330 |
} |
331 |
|
332 |
if (s->crypt_method) {
|
333 |
qcow2_encrypt_sectors(s, start_sect + n_start, |
334 |
iov.iov_base, iov.iov_base, n, 1,
|
335 |
&s->aes_encrypt_key); |
336 |
} |
337 |
|
338 |
BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); |
339 |
ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov);
|
340 |
if (ret < 0) { |
341 |
goto out;
|
342 |
} |
343 |
|
344 |
ret = 0;
|
345 |
out:
|
346 |
qemu_vfree(iov.iov_base); |
347 |
return ret;
|
348 |
} |
349 |
|
350 |
|
351 |
/*
|
352 |
* get_cluster_offset
|
353 |
*
|
354 |
* For a given offset of the disk image, find the cluster offset in
|
355 |
* qcow2 file. The offset is stored in *cluster_offset.
|
356 |
*
|
357 |
* on entry, *num is the number of contiguous sectors we'd like to
|
358 |
* access following offset.
|
359 |
*
|
360 |
* on exit, *num is the number of contiguous sectors we can read.
|
361 |
*
|
362 |
* Return 0, if the offset is found
|
363 |
* Return -errno, otherwise.
|
364 |
*
|
365 |
*/
|
366 |
|
367 |
int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
|
368 |
int *num, uint64_t *cluster_offset)
|
369 |
{ |
370 |
BDRVQcowState *s = bs->opaque; |
371 |
unsigned int l1_index, l2_index; |
372 |
uint64_t l2_offset, *l2_table; |
373 |
int l1_bits, c;
|
374 |
unsigned int index_in_cluster, nb_clusters; |
375 |
uint64_t nb_available, nb_needed; |
376 |
int ret;
|
377 |
|
378 |
index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); |
379 |
nb_needed = *num + index_in_cluster; |
380 |
|
381 |
l1_bits = s->l2_bits + s->cluster_bits; |
382 |
|
383 |
/* compute how many bytes there are between the offset and
|
384 |
* the end of the l1 entry
|
385 |
*/
|
386 |
|
387 |
nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)); |
388 |
|
389 |
/* compute the number of available sectors */
|
390 |
|
391 |
nb_available = (nb_available >> 9) + index_in_cluster;
|
392 |
|
393 |
if (nb_needed > nb_available) {
|
394 |
nb_needed = nb_available; |
395 |
} |
396 |
|
397 |
*cluster_offset = 0;
|
398 |
|
399 |
/* seek the the l2 offset in the l1 table */
|
400 |
|
401 |
l1_index = offset >> l1_bits; |
402 |
if (l1_index >= s->l1_size)
|
403 |
goto out;
|
404 |
|
405 |
l2_offset = s->l1_table[l1_index]; |
406 |
|
407 |
/* seek the l2 table of the given l2 offset */
|
408 |
|
409 |
if (!l2_offset)
|
410 |
goto out;
|
411 |
|
412 |
/* load the l2 table in memory */
|
413 |
|
414 |
l2_offset &= ~QCOW_OFLAG_COPIED; |
415 |
ret = l2_load(bs, l2_offset, &l2_table); |
416 |
if (ret < 0) { |
417 |
return ret;
|
418 |
} |
419 |
|
420 |
/* find the cluster offset for the given disk offset */
|
421 |
|
422 |
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
|
423 |
*cluster_offset = be64_to_cpu(l2_table[l2_index]); |
424 |
nb_clusters = size_to_clusters(s, nb_needed << 9);
|
425 |
|
426 |
if (!*cluster_offset) {
|
427 |
/* how many empty clusters ? */
|
428 |
c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); |
429 |
} else {
|
430 |
/* how many allocated clusters ? */
|
431 |
c = count_contiguous_clusters(nb_clusters, s->cluster_size, |
432 |
&l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
|
433 |
} |
434 |
|
435 |
qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
436 |
|
437 |
nb_available = (c * s->cluster_sectors); |
438 |
out:
|
439 |
if (nb_available > nb_needed)
|
440 |
nb_available = nb_needed; |
441 |
|
442 |
*num = nb_available - index_in_cluster; |
443 |
|
444 |
*cluster_offset &=~QCOW_OFLAG_COPIED; |
445 |
return 0; |
446 |
} |
447 |
|
448 |
/*
|
449 |
* get_cluster_table
|
450 |
*
|
451 |
* for a given disk offset, load (and allocate if needed)
|
452 |
* the l2 table.
|
453 |
*
|
454 |
* the l2 table offset in the qcow2 file and the cluster index
|
455 |
* in the l2 table are given to the caller.
|
456 |
*
|
457 |
* Returns 0 on success, -errno in failure case
|
458 |
*/
|
459 |
static int get_cluster_table(BlockDriverState *bs, uint64_t offset, |
460 |
uint64_t **new_l2_table, |
461 |
uint64_t *new_l2_offset, |
462 |
int *new_l2_index)
|
463 |
{ |
464 |
BDRVQcowState *s = bs->opaque; |
465 |
unsigned int l1_index, l2_index; |
466 |
uint64_t l2_offset; |
467 |
uint64_t *l2_table = NULL;
|
468 |
int ret;
|
469 |
|
470 |
/* seek the the l2 offset in the l1 table */
|
471 |
|
472 |
l1_index = offset >> (s->l2_bits + s->cluster_bits); |
473 |
if (l1_index >= s->l1_size) {
|
474 |
ret = qcow2_grow_l1_table(bs, l1_index + 1, false); |
475 |
if (ret < 0) { |
476 |
return ret;
|
477 |
} |
478 |
} |
479 |
l2_offset = s->l1_table[l1_index]; |
480 |
|
481 |
/* seek the l2 table of the given l2 offset */
|
482 |
|
483 |
if (l2_offset & QCOW_OFLAG_COPIED) {
|
484 |
/* load the l2 table in memory */
|
485 |
l2_offset &= ~QCOW_OFLAG_COPIED; |
486 |
ret = l2_load(bs, l2_offset, &l2_table); |
487 |
if (ret < 0) { |
488 |
return ret;
|
489 |
} |
490 |
} else {
|
491 |
/* First allocate a new L2 table (and do COW if needed) */
|
492 |
ret = l2_allocate(bs, l1_index, &l2_table); |
493 |
if (ret < 0) { |
494 |
return ret;
|
495 |
} |
496 |
|
497 |
/* Then decrease the refcount of the old table */
|
498 |
if (l2_offset) {
|
499 |
qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
|
500 |
} |
501 |
l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED; |
502 |
} |
503 |
|
504 |
/* find the cluster offset for the given disk offset */
|
505 |
|
506 |
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
|
507 |
|
508 |
*new_l2_table = l2_table; |
509 |
*new_l2_offset = l2_offset; |
510 |
*new_l2_index = l2_index; |
511 |
|
512 |
return 0; |
513 |
} |
514 |
|
515 |
/*
|
516 |
* alloc_compressed_cluster_offset
|
517 |
*
|
518 |
* For a given offset of the disk image, return cluster offset in
|
519 |
* qcow2 file.
|
520 |
*
|
521 |
* If the offset is not found, allocate a new compressed cluster.
|
522 |
*
|
523 |
* Return the cluster offset if successful,
|
524 |
* Return 0, otherwise.
|
525 |
*
|
526 |
*/
|
527 |
|
528 |
uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, |
529 |
uint64_t offset, |
530 |
int compressed_size)
|
531 |
{ |
532 |
BDRVQcowState *s = bs->opaque; |
533 |
int l2_index, ret;
|
534 |
uint64_t l2_offset, *l2_table; |
535 |
int64_t cluster_offset; |
536 |
int nb_csectors;
|
537 |
|
538 |
ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); |
539 |
if (ret < 0) { |
540 |
return 0; |
541 |
} |
542 |
|
543 |
cluster_offset = be64_to_cpu(l2_table[l2_index]); |
544 |
if (cluster_offset & QCOW_OFLAG_COPIED) {
|
545 |
qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
546 |
return 0; |
547 |
} |
548 |
|
549 |
if (cluster_offset)
|
550 |
qcow2_free_any_clusters(bs, cluster_offset, 1);
|
551 |
|
552 |
cluster_offset = qcow2_alloc_bytes(bs, compressed_size); |
553 |
if (cluster_offset < 0) { |
554 |
qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
555 |
return 0; |
556 |
} |
557 |
|
558 |
nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - |
559 |
(cluster_offset >> 9);
|
560 |
|
561 |
cluster_offset |= QCOW_OFLAG_COMPRESSED | |
562 |
((uint64_t)nb_csectors << s->csize_shift); |
563 |
|
564 |
/* update L2 table */
|
565 |
|
566 |
/* compressed clusters never have the copied flag */
|
567 |
|
568 |
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); |
569 |
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
570 |
l2_table[l2_index] = cpu_to_be64(cluster_offset); |
571 |
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
572 |
if (ret < 0) { |
573 |
return 0; |
574 |
} |
575 |
|
576 |
return cluster_offset;
|
577 |
} |
578 |
|
579 |
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
|
580 |
{ |
581 |
BDRVQcowState *s = bs->opaque; |
582 |
int i, j = 0, l2_index, ret; |
583 |
uint64_t *old_cluster, start_sect, l2_offset, *l2_table; |
584 |
uint64_t cluster_offset = m->cluster_offset; |
585 |
bool cow = false; |
586 |
|
587 |
if (m->nb_clusters == 0) |
588 |
return 0; |
589 |
|
590 |
old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t));
|
591 |
|
592 |
/* copy content of unmodified sectors */
|
593 |
start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9; |
594 |
if (m->n_start) {
|
595 |
cow = true;
|
596 |
qemu_co_mutex_unlock(&s->lock); |
597 |
ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
|
598 |
qemu_co_mutex_lock(&s->lock); |
599 |
if (ret < 0) |
600 |
goto err;
|
601 |
} |
602 |
|
603 |
if (m->nb_available & (s->cluster_sectors - 1)) { |
604 |
uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
|
605 |
cow = true;
|
606 |
qemu_co_mutex_unlock(&s->lock); |
607 |
ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
|
608 |
m->nb_available - end, s->cluster_sectors); |
609 |
qemu_co_mutex_lock(&s->lock); |
610 |
if (ret < 0) |
611 |
goto err;
|
612 |
} |
613 |
|
614 |
/*
|
615 |
* Update L2 table.
|
616 |
*
|
617 |
* Before we update the L2 table to actually point to the new cluster, we
|
618 |
* need to be sure that the refcounts have been increased and COW was
|
619 |
* handled.
|
620 |
*/
|
621 |
if (cow) {
|
622 |
qcow2_cache_depends_on_flush(s->l2_table_cache); |
623 |
} |
624 |
|
625 |
qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache); |
626 |
ret = get_cluster_table(bs, m->offset, &l2_table, &l2_offset, &l2_index); |
627 |
if (ret < 0) { |
628 |
goto err;
|
629 |
} |
630 |
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
631 |
|
632 |
for (i = 0; i < m->nb_clusters; i++) { |
633 |
/* if two concurrent writes happen to the same unallocated cluster
|
634 |
* each write allocates separate cluster and writes data concurrently.
|
635 |
* The first one to complete updates l2 table with pointer to its
|
636 |
* cluster the second one has to do RMW (which is done above by
|
637 |
* copy_sectors()), update l2 table with its cluster pointer and free
|
638 |
* old cluster. This is what this loop does */
|
639 |
if(l2_table[l2_index + i] != 0) |
640 |
old_cluster[j++] = l2_table[l2_index + i]; |
641 |
|
642 |
l2_table[l2_index + i] = cpu_to_be64((cluster_offset + |
643 |
(i << s->cluster_bits)) | QCOW_OFLAG_COPIED); |
644 |
} |
645 |
|
646 |
|
647 |
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
648 |
if (ret < 0) { |
649 |
goto err;
|
650 |
} |
651 |
|
652 |
/*
|
653 |
* If this was a COW, we need to decrease the refcount of the old cluster.
|
654 |
* Also flush bs->file to get the right order for L2 and refcount update.
|
655 |
*/
|
656 |
if (j != 0) { |
657 |
for (i = 0; i < j; i++) { |
658 |
qcow2_free_any_clusters(bs, |
659 |
be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED, 1);
|
660 |
} |
661 |
} |
662 |
|
663 |
ret = 0;
|
664 |
err:
|
665 |
g_free(old_cluster); |
666 |
return ret;
|
667 |
} |
668 |
|
669 |
/*
|
670 |
* alloc_cluster_offset
|
671 |
*
|
672 |
* For a given offset of the disk image, return cluster offset in qcow2 file.
|
673 |
* If the offset is not found, allocate a new cluster.
|
674 |
*
|
675 |
* If the cluster was already allocated, m->nb_clusters is set to 0,
|
676 |
* other fields in m are meaningless.
|
677 |
*
|
678 |
* If the cluster is newly allocated, m->nb_clusters is set to the number of
|
679 |
* contiguous clusters that have been allocated. In this case, the other
|
680 |
* fields of m are valid and contain information about the first allocated
|
681 |
* cluster.
|
682 |
*
|
683 |
* If the request conflicts with another write request in flight, the coroutine
|
684 |
* is queued and will be reentered when the dependency has completed.
|
685 |
*
|
686 |
* Return 0 on success and -errno in error cases
|
687 |
*/
|
688 |
int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
|
689 |
int n_start, int n_end, int *num, QCowL2Meta *m) |
690 |
{ |
691 |
BDRVQcowState *s = bs->opaque; |
692 |
int l2_index, ret;
|
693 |
uint64_t l2_offset, *l2_table; |
694 |
int64_t cluster_offset; |
695 |
unsigned int nb_clusters, i = 0; |
696 |
QCowL2Meta *old_alloc; |
697 |
|
698 |
ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); |
699 |
if (ret < 0) { |
700 |
return ret;
|
701 |
} |
702 |
|
703 |
again:
|
704 |
nb_clusters = size_to_clusters(s, n_end << 9);
|
705 |
|
706 |
nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); |
707 |
|
708 |
cluster_offset = be64_to_cpu(l2_table[l2_index]); |
709 |
|
710 |
/* We keep all QCOW_OFLAG_COPIED clusters */
|
711 |
|
712 |
if (cluster_offset & QCOW_OFLAG_COPIED) {
|
713 |
nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size, |
714 |
&l2_table[l2_index], 0, 0); |
715 |
|
716 |
cluster_offset &= ~QCOW_OFLAG_COPIED; |
717 |
m->nb_clusters = 0;
|
718 |
|
719 |
goto out;
|
720 |
} |
721 |
|
722 |
/* for the moment, multiple compressed clusters are not managed */
|
723 |
|
724 |
if (cluster_offset & QCOW_OFLAG_COMPRESSED)
|
725 |
nb_clusters = 1;
|
726 |
|
727 |
/* how many available clusters ? */
|
728 |
|
729 |
while (i < nb_clusters) {
|
730 |
i += count_contiguous_clusters(nb_clusters - i, s->cluster_size, |
731 |
&l2_table[l2_index], i, 0);
|
732 |
if ((i >= nb_clusters) || be64_to_cpu(l2_table[l2_index + i])) {
|
733 |
break;
|
734 |
} |
735 |
|
736 |
i += count_contiguous_free_clusters(nb_clusters - i, |
737 |
&l2_table[l2_index + i]); |
738 |
if (i >= nb_clusters) {
|
739 |
break;
|
740 |
} |
741 |
|
742 |
cluster_offset = be64_to_cpu(l2_table[l2_index + i]); |
743 |
|
744 |
if ((cluster_offset & QCOW_OFLAG_COPIED) ||
|
745 |
(cluster_offset & QCOW_OFLAG_COMPRESSED)) |
746 |
break;
|
747 |
} |
748 |
assert(i <= nb_clusters); |
749 |
nb_clusters = i; |
750 |
|
751 |
/*
|
752 |
* Check if there already is an AIO write request in flight which allocates
|
753 |
* the same cluster. In this case we need to wait until the previous
|
754 |
* request has completed and updated the L2 table accordingly.
|
755 |
*/
|
756 |
QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { |
757 |
|
758 |
uint64_t start = offset >> s->cluster_bits; |
759 |
uint64_t end = start + nb_clusters; |
760 |
uint64_t old_start = old_alloc->offset >> s->cluster_bits; |
761 |
uint64_t old_end = old_start + old_alloc->nb_clusters; |
762 |
|
763 |
if (end < old_start || start > old_end) {
|
764 |
/* No intersection */
|
765 |
} else {
|
766 |
if (start < old_start) {
|
767 |
/* Stop at the start of a running allocation */
|
768 |
nb_clusters = old_start - start; |
769 |
} else {
|
770 |
nb_clusters = 0;
|
771 |
} |
772 |
|
773 |
if (nb_clusters == 0) { |
774 |
/* Wait for the dependency to complete. We need to recheck
|
775 |
* the free/allocated clusters when we continue. */
|
776 |
qemu_co_mutex_unlock(&s->lock); |
777 |
qemu_co_queue_wait(&old_alloc->dependent_requests); |
778 |
qemu_co_mutex_lock(&s->lock); |
779 |
goto again;
|
780 |
} |
781 |
} |
782 |
} |
783 |
|
784 |
if (!nb_clusters) {
|
785 |
abort(); |
786 |
} |
787 |
|
788 |
/* save info needed for meta data update */
|
789 |
m->offset = offset; |
790 |
m->n_start = n_start; |
791 |
m->nb_clusters = nb_clusters; |
792 |
|
793 |
QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight); |
794 |
|
795 |
/* allocate a new cluster */
|
796 |
|
797 |
cluster_offset = qcow2_alloc_clusters(bs, nb_clusters * s->cluster_size); |
798 |
if (cluster_offset < 0) { |
799 |
ret = cluster_offset; |
800 |
goto fail;
|
801 |
} |
802 |
|
803 |
out:
|
804 |
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
805 |
if (ret < 0) { |
806 |
goto fail_put;
|
807 |
} |
808 |
|
809 |
m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end);
|
810 |
m->cluster_offset = cluster_offset; |
811 |
|
812 |
*num = m->nb_available - n_start; |
813 |
|
814 |
return 0; |
815 |
|
816 |
fail:
|
817 |
qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
818 |
fail_put:
|
819 |
QLIST_REMOVE(m, next_in_flight); |
820 |
return ret;
|
821 |
} |
822 |
|
823 |
static int decompress_buffer(uint8_t *out_buf, int out_buf_size, |
824 |
const uint8_t *buf, int buf_size) |
825 |
{ |
826 |
z_stream strm1, *strm = &strm1; |
827 |
int ret, out_len;
|
828 |
|
829 |
memset(strm, 0, sizeof(*strm)); |
830 |
|
831 |
strm->next_in = (uint8_t *)buf; |
832 |
strm->avail_in = buf_size; |
833 |
strm->next_out = out_buf; |
834 |
strm->avail_out = out_buf_size; |
835 |
|
836 |
ret = inflateInit2(strm, -12);
|
837 |
if (ret != Z_OK)
|
838 |
return -1; |
839 |
ret = inflate(strm, Z_FINISH); |
840 |
out_len = strm->next_out - out_buf; |
841 |
if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
|
842 |
out_len != out_buf_size) { |
843 |
inflateEnd(strm); |
844 |
return -1; |
845 |
} |
846 |
inflateEnd(strm); |
847 |
return 0; |
848 |
} |
849 |
|
850 |
int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
|
851 |
{ |
852 |
BDRVQcowState *s = bs->opaque; |
853 |
int ret, csize, nb_csectors, sector_offset;
|
854 |
uint64_t coffset; |
855 |
|
856 |
coffset = cluster_offset & s->cluster_offset_mask; |
857 |
if (s->cluster_cache_offset != coffset) {
|
858 |
nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
|
859 |
sector_offset = coffset & 511;
|
860 |
csize = nb_csectors * 512 - sector_offset;
|
861 |
BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); |
862 |
ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
|
863 |
if (ret < 0) { |
864 |
return ret;
|
865 |
} |
866 |
if (decompress_buffer(s->cluster_cache, s->cluster_size,
|
867 |
s->cluster_data + sector_offset, csize) < 0) {
|
868 |
return -EIO;
|
869 |
} |
870 |
s->cluster_cache_offset = coffset; |
871 |
} |
872 |
return 0; |
873 |
} |
874 |
|
875 |
/*
|
876 |
* This discards as many clusters of nb_clusters as possible at once (i.e.
|
877 |
* all clusters in the same L2 table) and returns the number of discarded
|
878 |
* clusters.
|
879 |
*/
|
880 |
static int discard_single_l2(BlockDriverState *bs, uint64_t offset, |
881 |
unsigned int nb_clusters) |
882 |
{ |
883 |
BDRVQcowState *s = bs->opaque; |
884 |
uint64_t l2_offset, *l2_table; |
885 |
int l2_index;
|
886 |
int ret;
|
887 |
int i;
|
888 |
|
889 |
ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); |
890 |
if (ret < 0) { |
891 |
return ret;
|
892 |
} |
893 |
|
894 |
/* Limit nb_clusters to one L2 table */
|
895 |
nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); |
896 |
|
897 |
for (i = 0; i < nb_clusters; i++) { |
898 |
uint64_t old_offset; |
899 |
|
900 |
old_offset = be64_to_cpu(l2_table[l2_index + i]); |
901 |
old_offset &= ~QCOW_OFLAG_COPIED; |
902 |
|
903 |
if (old_offset == 0) { |
904 |
continue;
|
905 |
} |
906 |
|
907 |
/* First remove L2 entries */
|
908 |
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
909 |
l2_table[l2_index + i] = cpu_to_be64(0);
|
910 |
|
911 |
/* Then decrease the refcount */
|
912 |
qcow2_free_any_clusters(bs, old_offset, 1);
|
913 |
} |
914 |
|
915 |
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
916 |
if (ret < 0) { |
917 |
return ret;
|
918 |
} |
919 |
|
920 |
return nb_clusters;
|
921 |
} |
922 |
|
923 |
int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
|
924 |
int nb_sectors)
|
925 |
{ |
926 |
BDRVQcowState *s = bs->opaque; |
927 |
uint64_t end_offset; |
928 |
unsigned int nb_clusters; |
929 |
int ret;
|
930 |
|
931 |
end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS); |
932 |
|
933 |
/* Round start up and end down */
|
934 |
offset = align_offset(offset, s->cluster_size); |
935 |
end_offset &= ~(s->cluster_size - 1);
|
936 |
|
937 |
if (offset > end_offset) {
|
938 |
return 0; |
939 |
} |
940 |
|
941 |
nb_clusters = size_to_clusters(s, end_offset - offset); |
942 |
|
943 |
/* Each L2 table is handled by its own loop iteration */
|
944 |
while (nb_clusters > 0) { |
945 |
ret = discard_single_l2(bs, offset, nb_clusters); |
946 |
if (ret < 0) { |
947 |
return ret;
|
948 |
} |
949 |
|
950 |
nb_clusters -= ret; |
951 |
offset += (ret * s->cluster_size); |
952 |
} |
953 |
|
954 |
return 0; |
955 |
} |