root / block / qcow2-cluster.c @ 72893756
History | View | Annotate | Download (27.5 kB)
1 |
/*
|
---|---|
2 |
* Block driver for the QCOW version 2 format
|
3 |
*
|
4 |
* Copyright (c) 2004-2006 Fabrice Bellard
|
5 |
*
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
8 |
* in the Software without restriction, including without limitation the rights
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
11 |
* furnished to do so, subject to the following conditions:
|
12 |
*
|
13 |
* The above copyright notice and this permission notice shall be included in
|
14 |
* all copies or substantial portions of the Software.
|
15 |
*
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 |
* THE SOFTWARE.
|
23 |
*/
|
24 |
|
25 |
#include <zlib.h> |
26 |
|
27 |
#include "qemu-common.h" |
28 |
#include "block_int.h" |
29 |
#include "block/qcow2.h" |
30 |
|
31 |
int qcow2_grow_l1_table(BlockDriverState *bs, int min_size, bool exact_size) |
32 |
{ |
33 |
BDRVQcowState *s = bs->opaque; |
34 |
int new_l1_size, new_l1_size2, ret, i;
|
35 |
uint64_t *new_l1_table; |
36 |
int64_t new_l1_table_offset; |
37 |
uint8_t data[12];
|
38 |
|
39 |
if (min_size <= s->l1_size)
|
40 |
return 0; |
41 |
|
42 |
if (exact_size) {
|
43 |
new_l1_size = min_size; |
44 |
} else {
|
45 |
/* Bump size up to reduce the number of times we have to grow */
|
46 |
new_l1_size = s->l1_size; |
47 |
if (new_l1_size == 0) { |
48 |
new_l1_size = 1;
|
49 |
} |
50 |
while (min_size > new_l1_size) {
|
51 |
new_l1_size = (new_l1_size * 3 + 1) / 2; |
52 |
} |
53 |
} |
54 |
|
55 |
#ifdef DEBUG_ALLOC2
|
56 |
printf("grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
|
57 |
#endif
|
58 |
|
59 |
new_l1_size2 = sizeof(uint64_t) * new_l1_size;
|
60 |
new_l1_table = qemu_mallocz(align_offset(new_l1_size2, 512));
|
61 |
memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
|
62 |
|
63 |
/* write new table (align to cluster) */
|
64 |
BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); |
65 |
new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); |
66 |
if (new_l1_table_offset < 0) { |
67 |
qemu_free(new_l1_table); |
68 |
return new_l1_table_offset;
|
69 |
} |
70 |
bdrv_flush(bs->file); |
71 |
|
72 |
BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); |
73 |
for(i = 0; i < s->l1_size; i++) |
74 |
new_l1_table[i] = cpu_to_be64(new_l1_table[i]); |
75 |
ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2); |
76 |
if (ret < 0) |
77 |
goto fail;
|
78 |
for(i = 0; i < s->l1_size; i++) |
79 |
new_l1_table[i] = be64_to_cpu(new_l1_table[i]); |
80 |
|
81 |
/* set new table */
|
82 |
BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); |
83 |
cpu_to_be32w((uint32_t*)data, new_l1_size); |
84 |
cpu_to_be64w((uint64_t*)(data + 4), new_l1_table_offset);
|
85 |
ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
|
86 |
if (ret < 0) { |
87 |
goto fail;
|
88 |
} |
89 |
qemu_free(s->l1_table); |
90 |
qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
|
91 |
s->l1_table_offset = new_l1_table_offset; |
92 |
s->l1_table = new_l1_table; |
93 |
s->l1_size = new_l1_size; |
94 |
return 0; |
95 |
fail:
|
96 |
qemu_free(new_l1_table); |
97 |
qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2); |
98 |
return ret;
|
99 |
} |
100 |
|
101 |
void qcow2_l2_cache_reset(BlockDriverState *bs)
|
102 |
{ |
103 |
BDRVQcowState *s = bs->opaque; |
104 |
|
105 |
memset(s->l2_cache, 0, s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t)); |
106 |
memset(s->l2_cache_offsets, 0, L2_CACHE_SIZE * sizeof(uint64_t)); |
107 |
memset(s->l2_cache_counts, 0, L2_CACHE_SIZE * sizeof(uint32_t)); |
108 |
} |
109 |
|
110 |
static inline int l2_cache_new_entry(BlockDriverState *bs) |
111 |
{ |
112 |
BDRVQcowState *s = bs->opaque; |
113 |
uint32_t min_count; |
114 |
int min_index, i;
|
115 |
|
116 |
/* find a new entry in the least used one */
|
117 |
min_index = 0;
|
118 |
min_count = 0xffffffff;
|
119 |
for(i = 0; i < L2_CACHE_SIZE; i++) { |
120 |
if (s->l2_cache_counts[i] < min_count) {
|
121 |
min_count = s->l2_cache_counts[i]; |
122 |
min_index = i; |
123 |
} |
124 |
} |
125 |
return min_index;
|
126 |
} |
127 |
|
128 |
/*
|
129 |
* seek_l2_table
|
130 |
*
|
131 |
* seek l2_offset in the l2_cache table
|
132 |
* if not found, return NULL,
|
133 |
* if found,
|
134 |
* increments the l2 cache hit count of the entry,
|
135 |
* if counter overflow, divide by two all counters
|
136 |
* return the pointer to the l2 cache entry
|
137 |
*
|
138 |
*/
|
139 |
|
140 |
static uint64_t *seek_l2_table(BDRVQcowState *s, uint64_t l2_offset)
|
141 |
{ |
142 |
int i, j;
|
143 |
|
144 |
for(i = 0; i < L2_CACHE_SIZE; i++) { |
145 |
if (l2_offset == s->l2_cache_offsets[i]) {
|
146 |
/* increment the hit count */
|
147 |
if (++s->l2_cache_counts[i] == 0xffffffff) { |
148 |
for(j = 0; j < L2_CACHE_SIZE; j++) { |
149 |
s->l2_cache_counts[j] >>= 1;
|
150 |
} |
151 |
} |
152 |
return s->l2_cache + (i << s->l2_bits);
|
153 |
} |
154 |
} |
155 |
return NULL; |
156 |
} |
157 |
|
158 |
/*
|
159 |
* l2_load
|
160 |
*
|
161 |
* Loads a L2 table into memory. If the table is in the cache, the cache
|
162 |
* is used; otherwise the L2 table is loaded from the image file.
|
163 |
*
|
164 |
* Returns a pointer to the L2 table on success, or NULL if the read from
|
165 |
* the image file failed.
|
166 |
*/
|
167 |
|
168 |
static int l2_load(BlockDriverState *bs, uint64_t l2_offset, |
169 |
uint64_t **l2_table) |
170 |
{ |
171 |
BDRVQcowState *s = bs->opaque; |
172 |
int min_index;
|
173 |
int ret;
|
174 |
|
175 |
/* seek if the table for the given offset is in the cache */
|
176 |
|
177 |
*l2_table = seek_l2_table(s, l2_offset); |
178 |
if (*l2_table != NULL) { |
179 |
return 0; |
180 |
} |
181 |
|
182 |
/* not found: load a new entry in the least used one */
|
183 |
|
184 |
min_index = l2_cache_new_entry(bs); |
185 |
*l2_table = s->l2_cache + (min_index << s->l2_bits); |
186 |
|
187 |
BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD); |
188 |
ret = bdrv_pread(bs->file, l2_offset, *l2_table, |
189 |
s->l2_size * sizeof(uint64_t));
|
190 |
if (ret < 0) { |
191 |
return ret;
|
192 |
} |
193 |
|
194 |
s->l2_cache_offsets[min_index] = l2_offset; |
195 |
s->l2_cache_counts[min_index] = 1;
|
196 |
|
197 |
return 0; |
198 |
} |
199 |
|
200 |
/*
|
201 |
* Writes one sector of the L1 table to the disk (can't update single entries
|
202 |
* and we really don't want bdrv_pread to perform a read-modify-write)
|
203 |
*/
|
204 |
#define L1_ENTRIES_PER_SECTOR (512 / 8) |
205 |
static int write_l1_entry(BlockDriverState *bs, int l1_index) |
206 |
{ |
207 |
BDRVQcowState *s = bs->opaque; |
208 |
uint64_t buf[L1_ENTRIES_PER_SECTOR]; |
209 |
int l1_start_index;
|
210 |
int i, ret;
|
211 |
|
212 |
l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
|
213 |
for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) { |
214 |
buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); |
215 |
} |
216 |
|
217 |
BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); |
218 |
ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
|
219 |
buf, sizeof(buf));
|
220 |
if (ret < 0) { |
221 |
return ret;
|
222 |
} |
223 |
|
224 |
return 0; |
225 |
} |
226 |
|
227 |
/*
|
228 |
* l2_allocate
|
229 |
*
|
230 |
* Allocate a new l2 entry in the file. If l1_index points to an already
|
231 |
* used entry in the L2 table (i.e. we are doing a copy on write for the L2
|
232 |
* table) copy the contents of the old L2 table into the newly allocated one.
|
233 |
* Otherwise the new table is initialized with zeros.
|
234 |
*
|
235 |
*/
|
236 |
|
237 |
static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) |
238 |
{ |
239 |
BDRVQcowState *s = bs->opaque; |
240 |
int min_index;
|
241 |
uint64_t old_l2_offset; |
242 |
uint64_t *l2_table; |
243 |
int64_t l2_offset; |
244 |
int ret;
|
245 |
|
246 |
old_l2_offset = s->l1_table[l1_index]; |
247 |
|
248 |
/* allocate a new l2 entry */
|
249 |
|
250 |
l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
|
251 |
if (l2_offset < 0) { |
252 |
return l2_offset;
|
253 |
} |
254 |
bdrv_flush(bs->file); |
255 |
|
256 |
/* allocate a new entry in the l2 cache */
|
257 |
|
258 |
min_index = l2_cache_new_entry(bs); |
259 |
l2_table = s->l2_cache + (min_index << s->l2_bits); |
260 |
|
261 |
if (old_l2_offset == 0) { |
262 |
/* if there was no old l2 table, clear the new table */
|
263 |
memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); |
264 |
} else {
|
265 |
/* if there was an old l2 table, read it from the disk */
|
266 |
BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); |
267 |
ret = bdrv_pread(bs->file, old_l2_offset, l2_table, |
268 |
s->l2_size * sizeof(uint64_t));
|
269 |
if (ret < 0) { |
270 |
goto fail;
|
271 |
} |
272 |
} |
273 |
/* write the l2 table to the file */
|
274 |
BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); |
275 |
ret = bdrv_pwrite_sync(bs->file, l2_offset, l2_table, |
276 |
s->l2_size * sizeof(uint64_t));
|
277 |
if (ret < 0) { |
278 |
goto fail;
|
279 |
} |
280 |
|
281 |
/* update the L1 entry */
|
282 |
s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; |
283 |
ret = write_l1_entry(bs, l1_index); |
284 |
if (ret < 0) { |
285 |
goto fail;
|
286 |
} |
287 |
|
288 |
/* update the l2 cache entry */
|
289 |
|
290 |
s->l2_cache_offsets[min_index] = l2_offset; |
291 |
s->l2_cache_counts[min_index] = 1;
|
292 |
|
293 |
*table = l2_table; |
294 |
return 0; |
295 |
|
296 |
fail:
|
297 |
s->l1_table[l1_index] = old_l2_offset; |
298 |
qcow2_l2_cache_reset(bs); |
299 |
return ret;
|
300 |
} |
301 |
|
302 |
static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, |
303 |
uint64_t *l2_table, uint64_t start, uint64_t mask) |
304 |
{ |
305 |
int i;
|
306 |
uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;
|
307 |
|
308 |
if (!offset)
|
309 |
return 0; |
310 |
|
311 |
for (i = start; i < start + nb_clusters; i++)
|
312 |
if (offset + (uint64_t) i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
|
313 |
break;
|
314 |
|
315 |
return (i - start);
|
316 |
} |
317 |
|
318 |
static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) |
319 |
{ |
320 |
int i = 0; |
321 |
|
322 |
while(nb_clusters-- && l2_table[i] == 0) |
323 |
i++; |
324 |
|
325 |
return i;
|
326 |
} |
327 |
|
328 |
/* The crypt function is compatible with the linux cryptoloop
|
329 |
algorithm for < 4 GB images. NOTE: out_buf == in_buf is
|
330 |
supported */
|
331 |
void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
|
332 |
uint8_t *out_buf, const uint8_t *in_buf,
|
333 |
int nb_sectors, int enc, |
334 |
const AES_KEY *key)
|
335 |
{ |
336 |
union {
|
337 |
uint64_t ll[2];
|
338 |
uint8_t b[16];
|
339 |
} ivec; |
340 |
int i;
|
341 |
|
342 |
for(i = 0; i < nb_sectors; i++) { |
343 |
ivec.ll[0] = cpu_to_le64(sector_num);
|
344 |
ivec.ll[1] = 0; |
345 |
AES_cbc_encrypt(in_buf, out_buf, 512, key,
|
346 |
ivec.b, enc); |
347 |
sector_num++; |
348 |
in_buf += 512;
|
349 |
out_buf += 512;
|
350 |
} |
351 |
} |
352 |
|
353 |
|
354 |
static int qcow_read(BlockDriverState *bs, int64_t sector_num, |
355 |
uint8_t *buf, int nb_sectors)
|
356 |
{ |
357 |
BDRVQcowState *s = bs->opaque; |
358 |
int ret, index_in_cluster, n, n1;
|
359 |
uint64_t cluster_offset; |
360 |
struct iovec iov;
|
361 |
QEMUIOVector qiov; |
362 |
|
363 |
while (nb_sectors > 0) { |
364 |
n = nb_sectors; |
365 |
|
366 |
ret = qcow2_get_cluster_offset(bs, sector_num << 9, &n,
|
367 |
&cluster_offset); |
368 |
if (ret < 0) { |
369 |
return ret;
|
370 |
} |
371 |
|
372 |
index_in_cluster = sector_num & (s->cluster_sectors - 1);
|
373 |
if (!cluster_offset) {
|
374 |
if (bs->backing_hd) {
|
375 |
/* read from the base image */
|
376 |
iov.iov_base = buf; |
377 |
iov.iov_len = n * 512;
|
378 |
qemu_iovec_init_external(&qiov, &iov, 1);
|
379 |
|
380 |
n1 = qcow2_backing_read1(bs->backing_hd, &qiov, sector_num, n); |
381 |
if (n1 > 0) { |
382 |
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING); |
383 |
ret = bdrv_read(bs->backing_hd, sector_num, buf, n1); |
384 |
if (ret < 0) |
385 |
return -1; |
386 |
} |
387 |
} else {
|
388 |
memset(buf, 0, 512 * n); |
389 |
} |
390 |
} else if (cluster_offset & QCOW_OFLAG_COMPRESSED) { |
391 |
if (qcow2_decompress_cluster(bs, cluster_offset) < 0) |
392 |
return -1; |
393 |
memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n); |
394 |
} else {
|
395 |
BLKDBG_EVENT(bs->file, BLKDBG_READ); |
396 |
ret = bdrv_pread(bs->file, cluster_offset + index_in_cluster * 512, buf, n * 512); |
397 |
if (ret != n * 512) |
398 |
return -1; |
399 |
if (s->crypt_method) {
|
400 |
qcow2_encrypt_sectors(s, sector_num, buf, buf, n, 0,
|
401 |
&s->aes_decrypt_key); |
402 |
} |
403 |
} |
404 |
nb_sectors -= n; |
405 |
sector_num += n; |
406 |
buf += n * 512;
|
407 |
} |
408 |
return 0; |
409 |
} |
410 |
|
411 |
static int copy_sectors(BlockDriverState *bs, uint64_t start_sect, |
412 |
uint64_t cluster_offset, int n_start, int n_end) |
413 |
{ |
414 |
BDRVQcowState *s = bs->opaque; |
415 |
int n, ret;
|
416 |
|
417 |
n = n_end - n_start; |
418 |
if (n <= 0) |
419 |
return 0; |
420 |
BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); |
421 |
ret = qcow_read(bs, start_sect + n_start, s->cluster_data, n); |
422 |
if (ret < 0) |
423 |
return ret;
|
424 |
if (s->crypt_method) {
|
425 |
qcow2_encrypt_sectors(s, start_sect + n_start, |
426 |
s->cluster_data, |
427 |
s->cluster_data, n, 1,
|
428 |
&s->aes_encrypt_key); |
429 |
} |
430 |
BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); |
431 |
ret = bdrv_write(bs->file, (cluster_offset >> 9) + n_start,
|
432 |
s->cluster_data, n); |
433 |
if (ret < 0) |
434 |
return ret;
|
435 |
return 0; |
436 |
} |
437 |
|
438 |
|
439 |
/*
|
440 |
* get_cluster_offset
|
441 |
*
|
442 |
* For a given offset of the disk image, find the cluster offset in
|
443 |
* qcow2 file. The offset is stored in *cluster_offset.
|
444 |
*
|
445 |
* on entry, *num is the number of contiguous clusters we'd like to
|
446 |
* access following offset.
|
447 |
*
|
448 |
* on exit, *num is the number of contiguous clusters we can read.
|
449 |
*
|
450 |
* Return 0, if the offset is found
|
451 |
* Return -errno, otherwise.
|
452 |
*
|
453 |
*/
|
454 |
|
455 |
int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
|
456 |
int *num, uint64_t *cluster_offset)
|
457 |
{ |
458 |
BDRVQcowState *s = bs->opaque; |
459 |
unsigned int l1_index, l2_index; |
460 |
uint64_t l2_offset, *l2_table; |
461 |
int l1_bits, c;
|
462 |
unsigned int index_in_cluster, nb_clusters; |
463 |
uint64_t nb_available, nb_needed; |
464 |
int ret;
|
465 |
|
466 |
index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); |
467 |
nb_needed = *num + index_in_cluster; |
468 |
|
469 |
l1_bits = s->l2_bits + s->cluster_bits; |
470 |
|
471 |
/* compute how many bytes there are between the offset and
|
472 |
* the end of the l1 entry
|
473 |
*/
|
474 |
|
475 |
nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)); |
476 |
|
477 |
/* compute the number of available sectors */
|
478 |
|
479 |
nb_available = (nb_available >> 9) + index_in_cluster;
|
480 |
|
481 |
if (nb_needed > nb_available) {
|
482 |
nb_needed = nb_available; |
483 |
} |
484 |
|
485 |
*cluster_offset = 0;
|
486 |
|
487 |
/* seek the the l2 offset in the l1 table */
|
488 |
|
489 |
l1_index = offset >> l1_bits; |
490 |
if (l1_index >= s->l1_size)
|
491 |
goto out;
|
492 |
|
493 |
l2_offset = s->l1_table[l1_index]; |
494 |
|
495 |
/* seek the l2 table of the given l2 offset */
|
496 |
|
497 |
if (!l2_offset)
|
498 |
goto out;
|
499 |
|
500 |
/* load the l2 table in memory */
|
501 |
|
502 |
l2_offset &= ~QCOW_OFLAG_COPIED; |
503 |
ret = l2_load(bs, l2_offset, &l2_table); |
504 |
if (ret < 0) { |
505 |
return ret;
|
506 |
} |
507 |
|
508 |
/* find the cluster offset for the given disk offset */
|
509 |
|
510 |
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
|
511 |
*cluster_offset = be64_to_cpu(l2_table[l2_index]); |
512 |
nb_clusters = size_to_clusters(s, nb_needed << 9);
|
513 |
|
514 |
if (!*cluster_offset) {
|
515 |
/* how many empty clusters ? */
|
516 |
c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); |
517 |
} else {
|
518 |
/* how many allocated clusters ? */
|
519 |
c = count_contiguous_clusters(nb_clusters, s->cluster_size, |
520 |
&l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
|
521 |
} |
522 |
|
523 |
nb_available = (c * s->cluster_sectors); |
524 |
out:
|
525 |
if (nb_available > nb_needed)
|
526 |
nb_available = nb_needed; |
527 |
|
528 |
*num = nb_available - index_in_cluster; |
529 |
|
530 |
*cluster_offset &=~QCOW_OFLAG_COPIED; |
531 |
return 0; |
532 |
} |
533 |
|
534 |
/*
|
535 |
* get_cluster_table
|
536 |
*
|
537 |
* for a given disk offset, load (and allocate if needed)
|
538 |
* the l2 table.
|
539 |
*
|
540 |
* the l2 table offset in the qcow2 file and the cluster index
|
541 |
* in the l2 table are given to the caller.
|
542 |
*
|
543 |
* Returns 0 on success, -errno in failure case
|
544 |
*/
|
545 |
static int get_cluster_table(BlockDriverState *bs, uint64_t offset, |
546 |
uint64_t **new_l2_table, |
547 |
uint64_t *new_l2_offset, |
548 |
int *new_l2_index)
|
549 |
{ |
550 |
BDRVQcowState *s = bs->opaque; |
551 |
unsigned int l1_index, l2_index; |
552 |
uint64_t l2_offset; |
553 |
uint64_t *l2_table = NULL;
|
554 |
int ret;
|
555 |
|
556 |
/* seek the the l2 offset in the l1 table */
|
557 |
|
558 |
l1_index = offset >> (s->l2_bits + s->cluster_bits); |
559 |
if (l1_index >= s->l1_size) {
|
560 |
ret = qcow2_grow_l1_table(bs, l1_index + 1, false); |
561 |
if (ret < 0) { |
562 |
return ret;
|
563 |
} |
564 |
} |
565 |
l2_offset = s->l1_table[l1_index]; |
566 |
|
567 |
/* seek the l2 table of the given l2 offset */
|
568 |
|
569 |
if (l2_offset & QCOW_OFLAG_COPIED) {
|
570 |
/* load the l2 table in memory */
|
571 |
l2_offset &= ~QCOW_OFLAG_COPIED; |
572 |
ret = l2_load(bs, l2_offset, &l2_table); |
573 |
if (ret < 0) { |
574 |
return ret;
|
575 |
} |
576 |
} else {
|
577 |
if (l2_offset)
|
578 |
qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
|
579 |
ret = l2_allocate(bs, l1_index, &l2_table); |
580 |
if (ret < 0) { |
581 |
return ret;
|
582 |
} |
583 |
l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED; |
584 |
} |
585 |
|
586 |
/* find the cluster offset for the given disk offset */
|
587 |
|
588 |
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
|
589 |
|
590 |
*new_l2_table = l2_table; |
591 |
*new_l2_offset = l2_offset; |
592 |
*new_l2_index = l2_index; |
593 |
|
594 |
return 0; |
595 |
} |
596 |
|
597 |
/*
|
598 |
* alloc_compressed_cluster_offset
|
599 |
*
|
600 |
* For a given offset of the disk image, return cluster offset in
|
601 |
* qcow2 file.
|
602 |
*
|
603 |
* If the offset is not found, allocate a new compressed cluster.
|
604 |
*
|
605 |
* Return the cluster offset if successful,
|
606 |
* Return 0, otherwise.
|
607 |
*
|
608 |
*/
|
609 |
|
610 |
uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, |
611 |
uint64_t offset, |
612 |
int compressed_size)
|
613 |
{ |
614 |
BDRVQcowState *s = bs->opaque; |
615 |
int l2_index, ret;
|
616 |
uint64_t l2_offset, *l2_table; |
617 |
int64_t cluster_offset; |
618 |
int nb_csectors;
|
619 |
|
620 |
ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); |
621 |
if (ret < 0) { |
622 |
return 0; |
623 |
} |
624 |
|
625 |
cluster_offset = be64_to_cpu(l2_table[l2_index]); |
626 |
if (cluster_offset & QCOW_OFLAG_COPIED)
|
627 |
return cluster_offset & ~QCOW_OFLAG_COPIED;
|
628 |
|
629 |
if (cluster_offset)
|
630 |
qcow2_free_any_clusters(bs, cluster_offset, 1);
|
631 |
|
632 |
cluster_offset = qcow2_alloc_bytes(bs, compressed_size); |
633 |
if (cluster_offset < 0) { |
634 |
return 0; |
635 |
} |
636 |
|
637 |
nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - |
638 |
(cluster_offset >> 9);
|
639 |
|
640 |
cluster_offset |= QCOW_OFLAG_COMPRESSED | |
641 |
((uint64_t)nb_csectors << s->csize_shift); |
642 |
|
643 |
/* update L2 table */
|
644 |
|
645 |
/* compressed clusters never have the copied flag */
|
646 |
|
647 |
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); |
648 |
l2_table[l2_index] = cpu_to_be64(cluster_offset); |
649 |
if (bdrv_pwrite_sync(bs->file,
|
650 |
l2_offset + l2_index * sizeof(uint64_t),
|
651 |
l2_table + l2_index, |
652 |
sizeof(uint64_t)) < 0) |
653 |
return 0; |
654 |
|
655 |
return cluster_offset;
|
656 |
} |
657 |
|
658 |
/*
|
659 |
* Write L2 table updates to disk, writing whole sectors to avoid a
|
660 |
* read-modify-write in bdrv_pwrite
|
661 |
*/
|
662 |
#define L2_ENTRIES_PER_SECTOR (512 / 8) |
663 |
static int write_l2_entries(BlockDriverState *bs, uint64_t *l2_table, |
664 |
uint64_t l2_offset, int l2_index, int num) |
665 |
{ |
666 |
int l2_start_index = l2_index & ~(L1_ENTRIES_PER_SECTOR - 1); |
667 |
int start_offset = (8 * l2_index) & ~511; |
668 |
int end_offset = (8 * (l2_index + num) + 511) & ~511; |
669 |
size_t len = end_offset - start_offset; |
670 |
int ret;
|
671 |
|
672 |
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE); |
673 |
ret = bdrv_pwrite(bs->file, l2_offset + start_offset, |
674 |
&l2_table[l2_start_index], len); |
675 |
if (ret < 0) { |
676 |
return ret;
|
677 |
} |
678 |
|
679 |
return 0; |
680 |
} |
681 |
|
682 |
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
|
683 |
{ |
684 |
BDRVQcowState *s = bs->opaque; |
685 |
int i, j = 0, l2_index, ret; |
686 |
uint64_t *old_cluster, start_sect, l2_offset, *l2_table; |
687 |
uint64_t cluster_offset = m->cluster_offset; |
688 |
|
689 |
if (m->nb_clusters == 0) |
690 |
return 0; |
691 |
|
692 |
old_cluster = qemu_malloc(m->nb_clusters * sizeof(uint64_t));
|
693 |
|
694 |
/* copy content of unmodified sectors */
|
695 |
start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9; |
696 |
if (m->n_start) {
|
697 |
ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
|
698 |
if (ret < 0) |
699 |
goto err;
|
700 |
} |
701 |
|
702 |
if (m->nb_available & (s->cluster_sectors - 1)) { |
703 |
uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
|
704 |
ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
|
705 |
m->nb_available - end, s->cluster_sectors); |
706 |
if (ret < 0) |
707 |
goto err;
|
708 |
} |
709 |
|
710 |
/* update L2 table */
|
711 |
ret = get_cluster_table(bs, m->offset, &l2_table, &l2_offset, &l2_index); |
712 |
if (ret < 0) { |
713 |
goto err;
|
714 |
} |
715 |
|
716 |
for (i = 0; i < m->nb_clusters; i++) { |
717 |
/* if two concurrent writes happen to the same unallocated cluster
|
718 |
* each write allocates separate cluster and writes data concurrently.
|
719 |
* The first one to complete updates l2 table with pointer to its
|
720 |
* cluster the second one has to do RMW (which is done above by
|
721 |
* copy_sectors()), update l2 table with its cluster pointer and free
|
722 |
* old cluster. This is what this loop does */
|
723 |
if(l2_table[l2_index + i] != 0) |
724 |
old_cluster[j++] = l2_table[l2_index + i]; |
725 |
|
726 |
l2_table[l2_index + i] = cpu_to_be64((cluster_offset + |
727 |
(i << s->cluster_bits)) | QCOW_OFLAG_COPIED); |
728 |
} |
729 |
|
730 |
/*
|
731 |
* Before we update the L2 table to actually point to the new cluster, we
|
732 |
* need to be sure that the refcounts have been increased and COW was
|
733 |
* handled.
|
734 |
*/
|
735 |
bdrv_flush(bs->file); |
736 |
|
737 |
ret = write_l2_entries(bs, l2_table, l2_offset, l2_index, m->nb_clusters); |
738 |
if (ret < 0) { |
739 |
qcow2_l2_cache_reset(bs); |
740 |
goto err;
|
741 |
} |
742 |
|
743 |
/*
|
744 |
* If this was a COW, we need to decrease the refcount of the old cluster.
|
745 |
* Also flush bs->file to get the right order for L2 and refcount update.
|
746 |
*/
|
747 |
if (j != 0) { |
748 |
bdrv_flush(bs->file); |
749 |
for (i = 0; i < j; i++) { |
750 |
qcow2_free_any_clusters(bs, |
751 |
be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED, 1);
|
752 |
} |
753 |
} |
754 |
|
755 |
ret = 0;
|
756 |
err:
|
757 |
qemu_free(old_cluster); |
758 |
return ret;
|
759 |
} |
760 |
|
761 |
/*
|
762 |
* alloc_cluster_offset
|
763 |
*
|
764 |
* For a given offset of the disk image, return cluster offset in qcow2 file.
|
765 |
* If the offset is not found, allocate a new cluster.
|
766 |
*
|
767 |
* If the cluster was already allocated, m->nb_clusters is set to 0,
|
768 |
* m->depends_on is set to NULL and the other fields in m are meaningless.
|
769 |
*
|
770 |
* If the cluster is newly allocated, m->nb_clusters is set to the number of
|
771 |
* contiguous clusters that have been allocated. This may be 0 if the request
|
772 |
* conflict with another write request in flight; in this case, m->depends_on
|
773 |
* is set and the remaining fields of m are meaningless.
|
774 |
*
|
775 |
* If m->nb_clusters is non-zero, the other fields of m are valid and contain
|
776 |
* information about the first allocated cluster.
|
777 |
*
|
778 |
* Return 0 on success and -errno in error cases
|
779 |
*/
|
780 |
int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
|
781 |
int n_start, int n_end, int *num, QCowL2Meta *m) |
782 |
{ |
783 |
BDRVQcowState *s = bs->opaque; |
784 |
int l2_index, ret;
|
785 |
uint64_t l2_offset, *l2_table; |
786 |
int64_t cluster_offset; |
787 |
unsigned int nb_clusters, i = 0; |
788 |
QCowL2Meta *old_alloc; |
789 |
|
790 |
ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); |
791 |
if (ret < 0) { |
792 |
return ret;
|
793 |
} |
794 |
|
795 |
nb_clusters = size_to_clusters(s, n_end << 9);
|
796 |
|
797 |
nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); |
798 |
|
799 |
cluster_offset = be64_to_cpu(l2_table[l2_index]); |
800 |
|
801 |
/* We keep all QCOW_OFLAG_COPIED clusters */
|
802 |
|
803 |
if (cluster_offset & QCOW_OFLAG_COPIED) {
|
804 |
nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size, |
805 |
&l2_table[l2_index], 0, 0); |
806 |
|
807 |
cluster_offset &= ~QCOW_OFLAG_COPIED; |
808 |
m->nb_clusters = 0;
|
809 |
m->depends_on = NULL;
|
810 |
|
811 |
goto out;
|
812 |
} |
813 |
|
814 |
/* for the moment, multiple compressed clusters are not managed */
|
815 |
|
816 |
if (cluster_offset & QCOW_OFLAG_COMPRESSED)
|
817 |
nb_clusters = 1;
|
818 |
|
819 |
/* how many available clusters ? */
|
820 |
|
821 |
while (i < nb_clusters) {
|
822 |
i += count_contiguous_clusters(nb_clusters - i, s->cluster_size, |
823 |
&l2_table[l2_index], i, 0);
|
824 |
if ((i >= nb_clusters) || be64_to_cpu(l2_table[l2_index + i])) {
|
825 |
break;
|
826 |
} |
827 |
|
828 |
i += count_contiguous_free_clusters(nb_clusters - i, |
829 |
&l2_table[l2_index + i]); |
830 |
if (i >= nb_clusters) {
|
831 |
break;
|
832 |
} |
833 |
|
834 |
cluster_offset = be64_to_cpu(l2_table[l2_index + i]); |
835 |
|
836 |
if ((cluster_offset & QCOW_OFLAG_COPIED) ||
|
837 |
(cluster_offset & QCOW_OFLAG_COMPRESSED)) |
838 |
break;
|
839 |
} |
840 |
assert(i <= nb_clusters); |
841 |
nb_clusters = i; |
842 |
|
843 |
/*
|
844 |
* Check if there already is an AIO write request in flight which allocates
|
845 |
* the same cluster. In this case we need to wait until the previous
|
846 |
* request has completed and updated the L2 table accordingly.
|
847 |
*/
|
848 |
QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { |
849 |
|
850 |
uint64_t end_offset = offset + nb_clusters * s->cluster_size; |
851 |
uint64_t old_offset = old_alloc->offset; |
852 |
uint64_t old_end_offset = old_alloc->offset + |
853 |
old_alloc->nb_clusters * s->cluster_size; |
854 |
|
855 |
if (end_offset < old_offset || offset > old_end_offset) {
|
856 |
/* No intersection */
|
857 |
} else {
|
858 |
if (offset < old_offset) {
|
859 |
/* Stop at the start of a running allocation */
|
860 |
nb_clusters = (old_offset - offset) >> s->cluster_bits; |
861 |
} else {
|
862 |
nb_clusters = 0;
|
863 |
} |
864 |
|
865 |
if (nb_clusters == 0) { |
866 |
/* Set dependency and wait for a callback */
|
867 |
m->depends_on = old_alloc; |
868 |
m->nb_clusters = 0;
|
869 |
*num = 0;
|
870 |
return 0; |
871 |
} |
872 |
} |
873 |
} |
874 |
|
875 |
if (!nb_clusters) {
|
876 |
abort(); |
877 |
} |
878 |
|
879 |
QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight); |
880 |
|
881 |
/* allocate a new cluster */
|
882 |
|
883 |
cluster_offset = qcow2_alloc_clusters(bs, nb_clusters * s->cluster_size); |
884 |
if (cluster_offset < 0) { |
885 |
QLIST_REMOVE(m, next_in_flight); |
886 |
return cluster_offset;
|
887 |
} |
888 |
|
889 |
/* save info needed for meta data update */
|
890 |
m->offset = offset; |
891 |
m->n_start = n_start; |
892 |
m->nb_clusters = nb_clusters; |
893 |
|
894 |
out:
|
895 |
m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end);
|
896 |
m->cluster_offset = cluster_offset; |
897 |
|
898 |
*num = m->nb_available - n_start; |
899 |
|
900 |
return 0; |
901 |
} |
902 |
|
903 |
static int decompress_buffer(uint8_t *out_buf, int out_buf_size, |
904 |
const uint8_t *buf, int buf_size) |
905 |
{ |
906 |
z_stream strm1, *strm = &strm1; |
907 |
int ret, out_len;
|
908 |
|
909 |
memset(strm, 0, sizeof(*strm)); |
910 |
|
911 |
strm->next_in = (uint8_t *)buf; |
912 |
strm->avail_in = buf_size; |
913 |
strm->next_out = out_buf; |
914 |
strm->avail_out = out_buf_size; |
915 |
|
916 |
ret = inflateInit2(strm, -12);
|
917 |
if (ret != Z_OK)
|
918 |
return -1; |
919 |
ret = inflate(strm, Z_FINISH); |
920 |
out_len = strm->next_out - out_buf; |
921 |
if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
|
922 |
out_len != out_buf_size) { |
923 |
inflateEnd(strm); |
924 |
return -1; |
925 |
} |
926 |
inflateEnd(strm); |
927 |
return 0; |
928 |
} |
929 |
|
930 |
int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
|
931 |
{ |
932 |
BDRVQcowState *s = bs->opaque; |
933 |
int ret, csize, nb_csectors, sector_offset;
|
934 |
uint64_t coffset; |
935 |
|
936 |
coffset = cluster_offset & s->cluster_offset_mask; |
937 |
if (s->cluster_cache_offset != coffset) {
|
938 |
nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
|
939 |
sector_offset = coffset & 511;
|
940 |
csize = nb_csectors * 512 - sector_offset;
|
941 |
BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); |
942 |
ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
|
943 |
if (ret < 0) { |
944 |
return -1; |
945 |
} |
946 |
if (decompress_buffer(s->cluster_cache, s->cluster_size,
|
947 |
s->cluster_data + sector_offset, csize) < 0) {
|
948 |
return -1; |
949 |
} |
950 |
s->cluster_cache_offset = coffset; |
951 |
} |
952 |
return 0; |
953 |
} |