root / block / qcow2-cluster.c @ f214978a
History | View | Annotate | Download (24.2 kB)
1 |
/*
|
---|---|
2 |
* Block driver for the QCOW version 2 format
|
3 |
*
|
4 |
* Copyright (c) 2004-2006 Fabrice Bellard
|
5 |
*
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
8 |
* in the Software without restriction, including without limitation the rights
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
11 |
* furnished to do so, subject to the following conditions:
|
12 |
*
|
13 |
* The above copyright notice and this permission notice shall be included in
|
14 |
* all copies or substantial portions of the Software.
|
15 |
*
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 |
* THE SOFTWARE.
|
23 |
*/
|
24 |
|
25 |
#include <zlib.h> |
26 |
|
27 |
#include "qemu-common.h" |
28 |
#include "block_int.h" |
29 |
#include "block/qcow2.h" |
30 |
|
31 |
int qcow2_grow_l1_table(BlockDriverState *bs, int min_size) |
32 |
{ |
33 |
BDRVQcowState *s = bs->opaque; |
34 |
int new_l1_size, new_l1_size2, ret, i;
|
35 |
uint64_t *new_l1_table; |
36 |
uint64_t new_l1_table_offset; |
37 |
uint8_t data[12];
|
38 |
|
39 |
new_l1_size = s->l1_size; |
40 |
if (min_size <= new_l1_size)
|
41 |
return 0; |
42 |
while (min_size > new_l1_size) {
|
43 |
new_l1_size = (new_l1_size * 3 + 1) / 2; |
44 |
} |
45 |
#ifdef DEBUG_ALLOC2
|
46 |
printf("grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
|
47 |
#endif
|
48 |
|
49 |
new_l1_size2 = sizeof(uint64_t) * new_l1_size;
|
50 |
new_l1_table = qemu_mallocz(align_offset(new_l1_size2, 512));
|
51 |
memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
|
52 |
|
53 |
/* write new table (align to cluster) */
|
54 |
new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); |
55 |
|
56 |
for(i = 0; i < s->l1_size; i++) |
57 |
new_l1_table[i] = cpu_to_be64(new_l1_table[i]); |
58 |
ret = bdrv_pwrite(s->hd, new_l1_table_offset, new_l1_table, new_l1_size2); |
59 |
if (ret != new_l1_size2)
|
60 |
goto fail;
|
61 |
for(i = 0; i < s->l1_size; i++) |
62 |
new_l1_table[i] = be64_to_cpu(new_l1_table[i]); |
63 |
|
64 |
/* set new table */
|
65 |
cpu_to_be32w((uint32_t*)data, new_l1_size); |
66 |
cpu_to_be64w((uint64_t*)(data + 4), new_l1_table_offset);
|
67 |
if (bdrv_pwrite(s->hd, offsetof(QCowHeader, l1_size), data,
|
68 |
sizeof(data)) != sizeof(data)) |
69 |
goto fail;
|
70 |
qemu_free(s->l1_table); |
71 |
qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
|
72 |
s->l1_table_offset = new_l1_table_offset; |
73 |
s->l1_table = new_l1_table; |
74 |
s->l1_size = new_l1_size; |
75 |
return 0; |
76 |
fail:
|
77 |
qemu_free(s->l1_table); |
78 |
return -EIO;
|
79 |
} |
80 |
|
81 |
void qcow2_l2_cache_reset(BlockDriverState *bs)
|
82 |
{ |
83 |
BDRVQcowState *s = bs->opaque; |
84 |
|
85 |
memset(s->l2_cache, 0, s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t)); |
86 |
memset(s->l2_cache_offsets, 0, L2_CACHE_SIZE * sizeof(uint64_t)); |
87 |
memset(s->l2_cache_counts, 0, L2_CACHE_SIZE * sizeof(uint32_t)); |
88 |
} |
89 |
|
90 |
static inline int l2_cache_new_entry(BlockDriverState *bs) |
91 |
{ |
92 |
BDRVQcowState *s = bs->opaque; |
93 |
uint32_t min_count; |
94 |
int min_index, i;
|
95 |
|
96 |
/* find a new entry in the least used one */
|
97 |
min_index = 0;
|
98 |
min_count = 0xffffffff;
|
99 |
for(i = 0; i < L2_CACHE_SIZE; i++) { |
100 |
if (s->l2_cache_counts[i] < min_count) {
|
101 |
min_count = s->l2_cache_counts[i]; |
102 |
min_index = i; |
103 |
} |
104 |
} |
105 |
return min_index;
|
106 |
} |
107 |
|
108 |
/*
|
109 |
* seek_l2_table
|
110 |
*
|
111 |
* seek l2_offset in the l2_cache table
|
112 |
* if not found, return NULL,
|
113 |
* if found,
|
114 |
* increments the l2 cache hit count of the entry,
|
115 |
* if counter overflow, divide by two all counters
|
116 |
* return the pointer to the l2 cache entry
|
117 |
*
|
118 |
*/
|
119 |
|
120 |
static uint64_t *seek_l2_table(BDRVQcowState *s, uint64_t l2_offset)
|
121 |
{ |
122 |
int i, j;
|
123 |
|
124 |
for(i = 0; i < L2_CACHE_SIZE; i++) { |
125 |
if (l2_offset == s->l2_cache_offsets[i]) {
|
126 |
/* increment the hit count */
|
127 |
if (++s->l2_cache_counts[i] == 0xffffffff) { |
128 |
for(j = 0; j < L2_CACHE_SIZE; j++) { |
129 |
s->l2_cache_counts[j] >>= 1;
|
130 |
} |
131 |
} |
132 |
return s->l2_cache + (i << s->l2_bits);
|
133 |
} |
134 |
} |
135 |
return NULL; |
136 |
} |
137 |
|
138 |
/*
|
139 |
* l2_load
|
140 |
*
|
141 |
* Loads a L2 table into memory. If the table is in the cache, the cache
|
142 |
* is used; otherwise the L2 table is loaded from the image file.
|
143 |
*
|
144 |
* Returns a pointer to the L2 table on success, or NULL if the read from
|
145 |
* the image file failed.
|
146 |
*/
|
147 |
|
148 |
static uint64_t *l2_load(BlockDriverState *bs, uint64_t l2_offset)
|
149 |
{ |
150 |
BDRVQcowState *s = bs->opaque; |
151 |
int min_index;
|
152 |
uint64_t *l2_table; |
153 |
|
154 |
/* seek if the table for the given offset is in the cache */
|
155 |
|
156 |
l2_table = seek_l2_table(s, l2_offset); |
157 |
if (l2_table != NULL) |
158 |
return l2_table;
|
159 |
|
160 |
/* not found: load a new entry in the least used one */
|
161 |
|
162 |
min_index = l2_cache_new_entry(bs); |
163 |
l2_table = s->l2_cache + (min_index << s->l2_bits); |
164 |
if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) != |
165 |
s->l2_size * sizeof(uint64_t))
|
166 |
return NULL; |
167 |
s->l2_cache_offsets[min_index] = l2_offset; |
168 |
s->l2_cache_counts[min_index] = 1;
|
169 |
|
170 |
return l2_table;
|
171 |
} |
172 |
|
173 |
/*
|
174 |
* Writes one sector of the L1 table to the disk (can't update single entries
|
175 |
* and we really don't want bdrv_pread to perform a read-modify-write)
|
176 |
*/
|
177 |
#define L1_ENTRIES_PER_SECTOR (512 / 8) |
178 |
static int write_l1_entry(BDRVQcowState *s, int l1_index) |
179 |
{ |
180 |
uint64_t buf[L1_ENTRIES_PER_SECTOR]; |
181 |
int l1_start_index;
|
182 |
int i;
|
183 |
|
184 |
l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
|
185 |
for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) { |
186 |
buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); |
187 |
} |
188 |
|
189 |
if (bdrv_pwrite(s->hd, s->l1_table_offset + 8 * l1_start_index, |
190 |
buf, sizeof(buf)) != sizeof(buf)) |
191 |
{ |
192 |
return -1; |
193 |
} |
194 |
|
195 |
return 0; |
196 |
} |
197 |
|
198 |
/*
|
199 |
* l2_allocate
|
200 |
*
|
201 |
* Allocate a new l2 entry in the file. If l1_index points to an already
|
202 |
* used entry in the L2 table (i.e. we are doing a copy on write for the L2
|
203 |
* table) copy the contents of the old L2 table into the newly allocated one.
|
204 |
* Otherwise the new table is initialized with zeros.
|
205 |
*
|
206 |
*/
|
207 |
|
208 |
static uint64_t *l2_allocate(BlockDriverState *bs, int l1_index) |
209 |
{ |
210 |
BDRVQcowState *s = bs->opaque; |
211 |
int min_index;
|
212 |
uint64_t old_l2_offset; |
213 |
uint64_t *l2_table, l2_offset; |
214 |
|
215 |
old_l2_offset = s->l1_table[l1_index]; |
216 |
|
217 |
/* allocate a new l2 entry */
|
218 |
|
219 |
l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
|
220 |
|
221 |
/* update the L1 entry */
|
222 |
|
223 |
s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; |
224 |
if (write_l1_entry(s, l1_index) < 0) { |
225 |
return NULL; |
226 |
} |
227 |
|
228 |
/* allocate a new entry in the l2 cache */
|
229 |
|
230 |
min_index = l2_cache_new_entry(bs); |
231 |
l2_table = s->l2_cache + (min_index << s->l2_bits); |
232 |
|
233 |
if (old_l2_offset == 0) { |
234 |
/* if there was no old l2 table, clear the new table */
|
235 |
memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); |
236 |
} else {
|
237 |
/* if there was an old l2 table, read it from the disk */
|
238 |
if (bdrv_pread(s->hd, old_l2_offset,
|
239 |
l2_table, s->l2_size * sizeof(uint64_t)) !=
|
240 |
s->l2_size * sizeof(uint64_t))
|
241 |
return NULL; |
242 |
} |
243 |
/* write the l2 table to the file */
|
244 |
if (bdrv_pwrite(s->hd, l2_offset,
|
245 |
l2_table, s->l2_size * sizeof(uint64_t)) !=
|
246 |
s->l2_size * sizeof(uint64_t))
|
247 |
return NULL; |
248 |
|
249 |
/* update the l2 cache entry */
|
250 |
|
251 |
s->l2_cache_offsets[min_index] = l2_offset; |
252 |
s->l2_cache_counts[min_index] = 1;
|
253 |
|
254 |
return l2_table;
|
255 |
} |
256 |
|
257 |
static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, |
258 |
uint64_t *l2_table, uint64_t start, uint64_t mask) |
259 |
{ |
260 |
int i;
|
261 |
uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;
|
262 |
|
263 |
if (!offset)
|
264 |
return 0; |
265 |
|
266 |
for (i = start; i < start + nb_clusters; i++)
|
267 |
if (offset + i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
|
268 |
break;
|
269 |
|
270 |
return (i - start);
|
271 |
} |
272 |
|
273 |
static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) |
274 |
{ |
275 |
int i = 0; |
276 |
|
277 |
while(nb_clusters-- && l2_table[i] == 0) |
278 |
i++; |
279 |
|
280 |
return i;
|
281 |
} |
282 |
|
283 |
/* The crypt function is compatible with the linux cryptoloop
|
284 |
algorithm for < 4 GB images. NOTE: out_buf == in_buf is
|
285 |
supported */
|
286 |
void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
|
287 |
uint8_t *out_buf, const uint8_t *in_buf,
|
288 |
int nb_sectors, int enc, |
289 |
const AES_KEY *key)
|
290 |
{ |
291 |
union {
|
292 |
uint64_t ll[2];
|
293 |
uint8_t b[16];
|
294 |
} ivec; |
295 |
int i;
|
296 |
|
297 |
for(i = 0; i < nb_sectors; i++) { |
298 |
ivec.ll[0] = cpu_to_le64(sector_num);
|
299 |
ivec.ll[1] = 0; |
300 |
AES_cbc_encrypt(in_buf, out_buf, 512, key,
|
301 |
ivec.b, enc); |
302 |
sector_num++; |
303 |
in_buf += 512;
|
304 |
out_buf += 512;
|
305 |
} |
306 |
} |
307 |
|
308 |
|
309 |
static int qcow_read(BlockDriverState *bs, int64_t sector_num, |
310 |
uint8_t *buf, int nb_sectors)
|
311 |
{ |
312 |
BDRVQcowState *s = bs->opaque; |
313 |
int ret, index_in_cluster, n, n1;
|
314 |
uint64_t cluster_offset; |
315 |
|
316 |
while (nb_sectors > 0) { |
317 |
n = nb_sectors; |
318 |
cluster_offset = qcow2_get_cluster_offset(bs, sector_num << 9, &n);
|
319 |
index_in_cluster = sector_num & (s->cluster_sectors - 1);
|
320 |
if (!cluster_offset) {
|
321 |
if (bs->backing_hd) {
|
322 |
/* read from the base image */
|
323 |
n1 = qcow2_backing_read1(bs->backing_hd, sector_num, buf, n); |
324 |
if (n1 > 0) { |
325 |
ret = bdrv_read(bs->backing_hd, sector_num, buf, n1); |
326 |
if (ret < 0) |
327 |
return -1; |
328 |
} |
329 |
} else {
|
330 |
memset(buf, 0, 512 * n); |
331 |
} |
332 |
} else if (cluster_offset & QCOW_OFLAG_COMPRESSED) { |
333 |
if (qcow2_decompress_cluster(s, cluster_offset) < 0) |
334 |
return -1; |
335 |
memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n); |
336 |
} else {
|
337 |
ret = bdrv_pread(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512); |
338 |
if (ret != n * 512) |
339 |
return -1; |
340 |
if (s->crypt_method) {
|
341 |
qcow2_encrypt_sectors(s, sector_num, buf, buf, n, 0,
|
342 |
&s->aes_decrypt_key); |
343 |
} |
344 |
} |
345 |
nb_sectors -= n; |
346 |
sector_num += n; |
347 |
buf += n * 512;
|
348 |
} |
349 |
return 0; |
350 |
} |
351 |
|
352 |
static int copy_sectors(BlockDriverState *bs, uint64_t start_sect, |
353 |
uint64_t cluster_offset, int n_start, int n_end) |
354 |
{ |
355 |
BDRVQcowState *s = bs->opaque; |
356 |
int n, ret;
|
357 |
|
358 |
n = n_end - n_start; |
359 |
if (n <= 0) |
360 |
return 0; |
361 |
ret = qcow_read(bs, start_sect + n_start, s->cluster_data, n); |
362 |
if (ret < 0) |
363 |
return ret;
|
364 |
if (s->crypt_method) {
|
365 |
qcow2_encrypt_sectors(s, start_sect + n_start, |
366 |
s->cluster_data, |
367 |
s->cluster_data, n, 1,
|
368 |
&s->aes_encrypt_key); |
369 |
} |
370 |
ret = bdrv_write(s->hd, (cluster_offset >> 9) + n_start,
|
371 |
s->cluster_data, n); |
372 |
if (ret < 0) |
373 |
return ret;
|
374 |
return 0; |
375 |
} |
376 |
|
377 |
|
378 |
/*
|
379 |
* get_cluster_offset
|
380 |
*
|
381 |
* For a given offset of the disk image, return cluster offset in
|
382 |
* qcow2 file.
|
383 |
*
|
384 |
* on entry, *num is the number of contiguous clusters we'd like to
|
385 |
* access following offset.
|
386 |
*
|
387 |
* on exit, *num is the number of contiguous clusters we can read.
|
388 |
*
|
389 |
* Return 1, if the offset is found
|
390 |
* Return 0, otherwise.
|
391 |
*
|
392 |
*/
|
393 |
|
394 |
uint64_t qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, |
395 |
int *num)
|
396 |
{ |
397 |
BDRVQcowState *s = bs->opaque; |
398 |
int l1_index, l2_index;
|
399 |
uint64_t l2_offset, *l2_table, cluster_offset; |
400 |
int l1_bits, c;
|
401 |
int index_in_cluster, nb_available, nb_needed, nb_clusters;
|
402 |
|
403 |
index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); |
404 |
nb_needed = *num + index_in_cluster; |
405 |
|
406 |
l1_bits = s->l2_bits + s->cluster_bits; |
407 |
|
408 |
/* compute how many bytes there are between the offset and
|
409 |
* the end of the l1 entry
|
410 |
*/
|
411 |
|
412 |
nb_available = (1 << l1_bits) - (offset & ((1 << l1_bits) - 1)); |
413 |
|
414 |
/* compute the number of available sectors */
|
415 |
|
416 |
nb_available = (nb_available >> 9) + index_in_cluster;
|
417 |
|
418 |
if (nb_needed > nb_available) {
|
419 |
nb_needed = nb_available; |
420 |
} |
421 |
|
422 |
cluster_offset = 0;
|
423 |
|
424 |
/* seek the the l2 offset in the l1 table */
|
425 |
|
426 |
l1_index = offset >> l1_bits; |
427 |
if (l1_index >= s->l1_size)
|
428 |
goto out;
|
429 |
|
430 |
l2_offset = s->l1_table[l1_index]; |
431 |
|
432 |
/* seek the l2 table of the given l2 offset */
|
433 |
|
434 |
if (!l2_offset)
|
435 |
goto out;
|
436 |
|
437 |
/* load the l2 table in memory */
|
438 |
|
439 |
l2_offset &= ~QCOW_OFLAG_COPIED; |
440 |
l2_table = l2_load(bs, l2_offset); |
441 |
if (l2_table == NULL) |
442 |
return 0; |
443 |
|
444 |
/* find the cluster offset for the given disk offset */
|
445 |
|
446 |
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
|
447 |
cluster_offset = be64_to_cpu(l2_table[l2_index]); |
448 |
nb_clusters = size_to_clusters(s, nb_needed << 9);
|
449 |
|
450 |
if (!cluster_offset) {
|
451 |
/* how many empty clusters ? */
|
452 |
c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); |
453 |
} else {
|
454 |
/* how many allocated clusters ? */
|
455 |
c = count_contiguous_clusters(nb_clusters, s->cluster_size, |
456 |
&l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
|
457 |
} |
458 |
|
459 |
nb_available = (c * s->cluster_sectors); |
460 |
out:
|
461 |
if (nb_available > nb_needed)
|
462 |
nb_available = nb_needed; |
463 |
|
464 |
*num = nb_available - index_in_cluster; |
465 |
|
466 |
return cluster_offset & ~QCOW_OFLAG_COPIED;
|
467 |
} |
468 |
|
469 |
/*
|
470 |
* get_cluster_table
|
471 |
*
|
472 |
* for a given disk offset, load (and allocate if needed)
|
473 |
* the l2 table.
|
474 |
*
|
475 |
* the l2 table offset in the qcow2 file and the cluster index
|
476 |
* in the l2 table are given to the caller.
|
477 |
*
|
478 |
*/
|
479 |
|
480 |
static int get_cluster_table(BlockDriverState *bs, uint64_t offset, |
481 |
uint64_t **new_l2_table, |
482 |
uint64_t *new_l2_offset, |
483 |
int *new_l2_index)
|
484 |
{ |
485 |
BDRVQcowState *s = bs->opaque; |
486 |
int l1_index, l2_index, ret;
|
487 |
uint64_t l2_offset, *l2_table; |
488 |
|
489 |
/* seek the the l2 offset in the l1 table */
|
490 |
|
491 |
l1_index = offset >> (s->l2_bits + s->cluster_bits); |
492 |
if (l1_index >= s->l1_size) {
|
493 |
ret = qcow2_grow_l1_table(bs, l1_index + 1);
|
494 |
if (ret < 0) |
495 |
return 0; |
496 |
} |
497 |
l2_offset = s->l1_table[l1_index]; |
498 |
|
499 |
/* seek the l2 table of the given l2 offset */
|
500 |
|
501 |
if (l2_offset & QCOW_OFLAG_COPIED) {
|
502 |
/* load the l2 table in memory */
|
503 |
l2_offset &= ~QCOW_OFLAG_COPIED; |
504 |
l2_table = l2_load(bs, l2_offset); |
505 |
if (l2_table == NULL) |
506 |
return 0; |
507 |
} else {
|
508 |
if (l2_offset)
|
509 |
qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
|
510 |
l2_table = l2_allocate(bs, l1_index); |
511 |
if (l2_table == NULL) |
512 |
return 0; |
513 |
l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED; |
514 |
} |
515 |
|
516 |
/* find the cluster offset for the given disk offset */
|
517 |
|
518 |
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
|
519 |
|
520 |
*new_l2_table = l2_table; |
521 |
*new_l2_offset = l2_offset; |
522 |
*new_l2_index = l2_index; |
523 |
|
524 |
return 1; |
525 |
} |
526 |
|
527 |
/*
|
528 |
* alloc_compressed_cluster_offset
|
529 |
*
|
530 |
* For a given offset of the disk image, return cluster offset in
|
531 |
* qcow2 file.
|
532 |
*
|
533 |
* If the offset is not found, allocate a new compressed cluster.
|
534 |
*
|
535 |
* Return the cluster offset if successful,
|
536 |
* Return 0, otherwise.
|
537 |
*
|
538 |
*/
|
539 |
|
540 |
uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, |
541 |
uint64_t offset, |
542 |
int compressed_size)
|
543 |
{ |
544 |
BDRVQcowState *s = bs->opaque; |
545 |
int l2_index, ret;
|
546 |
uint64_t l2_offset, *l2_table, cluster_offset; |
547 |
int nb_csectors;
|
548 |
|
549 |
ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); |
550 |
if (ret == 0) |
551 |
return 0; |
552 |
|
553 |
cluster_offset = be64_to_cpu(l2_table[l2_index]); |
554 |
if (cluster_offset & QCOW_OFLAG_COPIED)
|
555 |
return cluster_offset & ~QCOW_OFLAG_COPIED;
|
556 |
|
557 |
if (cluster_offset)
|
558 |
qcow2_free_any_clusters(bs, cluster_offset, 1);
|
559 |
|
560 |
cluster_offset = qcow2_alloc_bytes(bs, compressed_size); |
561 |
nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - |
562 |
(cluster_offset >> 9);
|
563 |
|
564 |
cluster_offset |= QCOW_OFLAG_COMPRESSED | |
565 |
((uint64_t)nb_csectors << s->csize_shift); |
566 |
|
567 |
/* update L2 table */
|
568 |
|
569 |
/* compressed clusters never have the copied flag */
|
570 |
|
571 |
l2_table[l2_index] = cpu_to_be64(cluster_offset); |
572 |
if (bdrv_pwrite(s->hd,
|
573 |
l2_offset + l2_index * sizeof(uint64_t),
|
574 |
l2_table + l2_index, |
575 |
sizeof(uint64_t)) != sizeof(uint64_t)) |
576 |
return 0; |
577 |
|
578 |
return cluster_offset;
|
579 |
} |
580 |
|
581 |
/*
|
582 |
* Write L2 table updates to disk, writing whole sectors to avoid a
|
583 |
* read-modify-write in bdrv_pwrite
|
584 |
*/
|
585 |
#define L2_ENTRIES_PER_SECTOR (512 / 8) |
586 |
static int write_l2_entries(BDRVQcowState *s, uint64_t *l2_table, |
587 |
uint64_t l2_offset, int l2_index, int num) |
588 |
{ |
589 |
int l2_start_index = l2_index & ~(L1_ENTRIES_PER_SECTOR - 1); |
590 |
int start_offset = (8 * l2_index) & ~511; |
591 |
int end_offset = (8 * (l2_index + num) + 511) & ~511; |
592 |
size_t len = end_offset - start_offset; |
593 |
|
594 |
if (bdrv_pwrite(s->hd, l2_offset + start_offset, &l2_table[l2_start_index],
|
595 |
len) != len) |
596 |
{ |
597 |
return -1; |
598 |
} |
599 |
|
600 |
return 0; |
601 |
} |
602 |
|
603 |
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, uint64_t cluster_offset,
|
604 |
QCowL2Meta *m) |
605 |
{ |
606 |
BDRVQcowState *s = bs->opaque; |
607 |
int i, j = 0, l2_index, ret; |
608 |
uint64_t *old_cluster, start_sect, l2_offset, *l2_table; |
609 |
|
610 |
if (m->nb_clusters == 0) |
611 |
return 0; |
612 |
|
613 |
old_cluster = qemu_malloc(m->nb_clusters * sizeof(uint64_t));
|
614 |
|
615 |
/* copy content of unmodified sectors */
|
616 |
start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9; |
617 |
if (m->n_start) {
|
618 |
ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
|
619 |
if (ret < 0) |
620 |
goto err;
|
621 |
} |
622 |
|
623 |
if (m->nb_available & (s->cluster_sectors - 1)) { |
624 |
uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
|
625 |
ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
|
626 |
m->nb_available - end, s->cluster_sectors); |
627 |
if (ret < 0) |
628 |
goto err;
|
629 |
} |
630 |
|
631 |
ret = -EIO; |
632 |
/* update L2 table */
|
633 |
if (!get_cluster_table(bs, m->offset, &l2_table, &l2_offset, &l2_index))
|
634 |
goto err;
|
635 |
|
636 |
for (i = 0; i < m->nb_clusters; i++) { |
637 |
/* if two concurrent writes happen to the same unallocated cluster
|
638 |
* each write allocates separate cluster and writes data concurrently.
|
639 |
* The first one to complete updates l2 table with pointer to its
|
640 |
* cluster the second one has to do RMW (which is done above by
|
641 |
* copy_sectors()), update l2 table with its cluster pointer and free
|
642 |
* old cluster. This is what this loop does */
|
643 |
if(l2_table[l2_index + i] != 0) |
644 |
old_cluster[j++] = l2_table[l2_index + i]; |
645 |
|
646 |
l2_table[l2_index + i] = cpu_to_be64((cluster_offset + |
647 |
(i << s->cluster_bits)) | QCOW_OFLAG_COPIED); |
648 |
} |
649 |
|
650 |
if (write_l2_entries(s, l2_table, l2_offset, l2_index, m->nb_clusters) < 0) { |
651 |
ret = -1;
|
652 |
goto err;
|
653 |
} |
654 |
|
655 |
for (i = 0; i < j; i++) |
656 |
qcow2_free_any_clusters(bs, |
657 |
be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED, 1);
|
658 |
|
659 |
ret = 0;
|
660 |
err:
|
661 |
qemu_free(old_cluster); |
662 |
return ret;
|
663 |
} |
664 |
|
665 |
/*
|
666 |
* alloc_cluster_offset
|
667 |
*
|
668 |
* For a given offset of the disk image, return cluster offset in
|
669 |
* qcow2 file.
|
670 |
*
|
671 |
* If the offset is not found, allocate a new cluster.
|
672 |
*
|
673 |
* Return the cluster offset if successful,
|
674 |
* Return 0, otherwise.
|
675 |
*
|
676 |
*/
|
677 |
|
678 |
uint64_t qcow2_alloc_cluster_offset(BlockDriverState *bs, |
679 |
uint64_t offset, |
680 |
int n_start, int n_end, |
681 |
int *num, QCowL2Meta *m)
|
682 |
{ |
683 |
BDRVQcowState *s = bs->opaque; |
684 |
int l2_index, ret;
|
685 |
uint64_t l2_offset, *l2_table, cluster_offset; |
686 |
int nb_clusters, i = 0; |
687 |
QCowL2Meta *old_alloc; |
688 |
|
689 |
ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); |
690 |
if (ret == 0) |
691 |
return 0; |
692 |
|
693 |
nb_clusters = size_to_clusters(s, n_end << 9);
|
694 |
|
695 |
nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); |
696 |
|
697 |
cluster_offset = be64_to_cpu(l2_table[l2_index]); |
698 |
|
699 |
/* We keep all QCOW_OFLAG_COPIED clusters */
|
700 |
|
701 |
if (cluster_offset & QCOW_OFLAG_COPIED) {
|
702 |
nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size, |
703 |
&l2_table[l2_index], 0, 0); |
704 |
|
705 |
cluster_offset &= ~QCOW_OFLAG_COPIED; |
706 |
m->nb_clusters = 0;
|
707 |
|
708 |
goto out;
|
709 |
} |
710 |
|
711 |
/* for the moment, multiple compressed clusters are not managed */
|
712 |
|
713 |
if (cluster_offset & QCOW_OFLAG_COMPRESSED)
|
714 |
nb_clusters = 1;
|
715 |
|
716 |
/* how many available clusters ? */
|
717 |
|
718 |
while (i < nb_clusters) {
|
719 |
i += count_contiguous_clusters(nb_clusters - i, s->cluster_size, |
720 |
&l2_table[l2_index], i, 0);
|
721 |
|
722 |
if(be64_to_cpu(l2_table[l2_index + i]))
|
723 |
break;
|
724 |
|
725 |
i += count_contiguous_free_clusters(nb_clusters - i, |
726 |
&l2_table[l2_index + i]); |
727 |
|
728 |
cluster_offset = be64_to_cpu(l2_table[l2_index + i]); |
729 |
|
730 |
if ((cluster_offset & QCOW_OFLAG_COPIED) ||
|
731 |
(cluster_offset & QCOW_OFLAG_COMPRESSED)) |
732 |
break;
|
733 |
} |
734 |
nb_clusters = i; |
735 |
|
736 |
/*
|
737 |
* Check if there already is an AIO write request in flight which allocates
|
738 |
* the same cluster. In this case we need to wait until the previous
|
739 |
* request has completed and updated the L2 table accordingly.
|
740 |
*/
|
741 |
LIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { |
742 |
|
743 |
uint64_t end_offset = offset + nb_clusters * s->cluster_size; |
744 |
uint64_t old_offset = old_alloc->offset; |
745 |
uint64_t old_end_offset = old_alloc->offset + |
746 |
old_alloc->nb_clusters * s->cluster_size; |
747 |
|
748 |
if (end_offset < old_offset || offset > old_end_offset) {
|
749 |
/* No intersection */
|
750 |
} else {
|
751 |
if (offset < old_offset) {
|
752 |
/* Stop at the start of a running allocation */
|
753 |
nb_clusters = (old_offset - offset) >> s->cluster_bits; |
754 |
} else {
|
755 |
nb_clusters = 0;
|
756 |
} |
757 |
|
758 |
if (nb_clusters == 0) { |
759 |
/* Set dependency and wait for a callback */
|
760 |
m->depends_on = old_alloc; |
761 |
m->nb_clusters = 0;
|
762 |
*num = 0;
|
763 |
return 0; |
764 |
} |
765 |
} |
766 |
} |
767 |
|
768 |
if (!nb_clusters) {
|
769 |
abort(); |
770 |
} |
771 |
|
772 |
LIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight); |
773 |
|
774 |
/* allocate a new cluster */
|
775 |
|
776 |
cluster_offset = qcow2_alloc_clusters(bs, nb_clusters * s->cluster_size); |
777 |
|
778 |
/* save info needed for meta data update */
|
779 |
m->offset = offset; |
780 |
m->n_start = n_start; |
781 |
m->nb_clusters = nb_clusters; |
782 |
|
783 |
out:
|
784 |
m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end);
|
785 |
|
786 |
*num = m->nb_available - n_start; |
787 |
|
788 |
return cluster_offset;
|
789 |
} |
790 |
|
791 |
static int decompress_buffer(uint8_t *out_buf, int out_buf_size, |
792 |
const uint8_t *buf, int buf_size) |
793 |
{ |
794 |
z_stream strm1, *strm = &strm1; |
795 |
int ret, out_len;
|
796 |
|
797 |
memset(strm, 0, sizeof(*strm)); |
798 |
|
799 |
strm->next_in = (uint8_t *)buf; |
800 |
strm->avail_in = buf_size; |
801 |
strm->next_out = out_buf; |
802 |
strm->avail_out = out_buf_size; |
803 |
|
804 |
ret = inflateInit2(strm, -12);
|
805 |
if (ret != Z_OK)
|
806 |
return -1; |
807 |
ret = inflate(strm, Z_FINISH); |
808 |
out_len = strm->next_out - out_buf; |
809 |
if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
|
810 |
out_len != out_buf_size) { |
811 |
inflateEnd(strm); |
812 |
return -1; |
813 |
} |
814 |
inflateEnd(strm); |
815 |
return 0; |
816 |
} |
817 |
|
818 |
int qcow2_decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset)
|
819 |
{ |
820 |
int ret, csize, nb_csectors, sector_offset;
|
821 |
uint64_t coffset; |
822 |
|
823 |
coffset = cluster_offset & s->cluster_offset_mask; |
824 |
if (s->cluster_cache_offset != coffset) {
|
825 |
nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
|
826 |
sector_offset = coffset & 511;
|
827 |
csize = nb_csectors * 512 - sector_offset;
|
828 |
ret = bdrv_read(s->hd, coffset >> 9, s->cluster_data, nb_csectors);
|
829 |
if (ret < 0) { |
830 |
return -1; |
831 |
} |
832 |
if (decompress_buffer(s->cluster_cache, s->cluster_size,
|
833 |
s->cluster_data + sector_offset, csize) < 0) {
|
834 |
return -1; |
835 |
} |
836 |
s->cluster_cache_offset = coffset; |
837 |
} |
838 |
return 0; |
839 |
} |