root / block / qcow2-cluster.c @ 737e150e
History | View | Annotate | Download (36.3 kB)
1 |
/*
|
---|---|
2 |
* Block driver for the QCOW version 2 format
|
3 |
*
|
4 |
* Copyright (c) 2004-2006 Fabrice Bellard
|
5 |
*
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
8 |
* in the Software without restriction, including without limitation the rights
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
11 |
* furnished to do so, subject to the following conditions:
|
12 |
*
|
13 |
* The above copyright notice and this permission notice shall be included in
|
14 |
* all copies or substantial portions of the Software.
|
15 |
*
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 |
* THE SOFTWARE.
|
23 |
*/
|
24 |
|
25 |
#include <zlib.h> |
26 |
|
27 |
#include "qemu-common.h" |
28 |
#include "block/block_int.h" |
29 |
#include "block/qcow2.h" |
30 |
#include "trace.h" |
31 |
|
32 |
int qcow2_grow_l1_table(BlockDriverState *bs, int min_size, bool exact_size) |
33 |
{ |
34 |
BDRVQcowState *s = bs->opaque; |
35 |
int new_l1_size, new_l1_size2, ret, i;
|
36 |
uint64_t *new_l1_table; |
37 |
int64_t new_l1_table_offset; |
38 |
uint8_t data[12];
|
39 |
|
40 |
if (min_size <= s->l1_size)
|
41 |
return 0; |
42 |
|
43 |
if (exact_size) {
|
44 |
new_l1_size = min_size; |
45 |
} else {
|
46 |
/* Bump size up to reduce the number of times we have to grow */
|
47 |
new_l1_size = s->l1_size; |
48 |
if (new_l1_size == 0) { |
49 |
new_l1_size = 1;
|
50 |
} |
51 |
while (min_size > new_l1_size) {
|
52 |
new_l1_size = (new_l1_size * 3 + 1) / 2; |
53 |
} |
54 |
} |
55 |
|
56 |
#ifdef DEBUG_ALLOC2
|
57 |
fprintf(stderr, "grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
|
58 |
#endif
|
59 |
|
60 |
new_l1_size2 = sizeof(uint64_t) * new_l1_size;
|
61 |
new_l1_table = g_malloc0(align_offset(new_l1_size2, 512));
|
62 |
memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
|
63 |
|
64 |
/* write new table (align to cluster) */
|
65 |
BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); |
66 |
new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); |
67 |
if (new_l1_table_offset < 0) { |
68 |
g_free(new_l1_table); |
69 |
return new_l1_table_offset;
|
70 |
} |
71 |
|
72 |
ret = qcow2_cache_flush(bs, s->refcount_block_cache); |
73 |
if (ret < 0) { |
74 |
goto fail;
|
75 |
} |
76 |
|
77 |
BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); |
78 |
for(i = 0; i < s->l1_size; i++) |
79 |
new_l1_table[i] = cpu_to_be64(new_l1_table[i]); |
80 |
ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2); |
81 |
if (ret < 0) |
82 |
goto fail;
|
83 |
for(i = 0; i < s->l1_size; i++) |
84 |
new_l1_table[i] = be64_to_cpu(new_l1_table[i]); |
85 |
|
86 |
/* set new table */
|
87 |
BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); |
88 |
cpu_to_be32w((uint32_t*)data, new_l1_size); |
89 |
cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset);
|
90 |
ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
|
91 |
if (ret < 0) { |
92 |
goto fail;
|
93 |
} |
94 |
g_free(s->l1_table); |
95 |
qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
|
96 |
s->l1_table_offset = new_l1_table_offset; |
97 |
s->l1_table = new_l1_table; |
98 |
s->l1_size = new_l1_size; |
99 |
return 0; |
100 |
fail:
|
101 |
g_free(new_l1_table); |
102 |
qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2); |
103 |
return ret;
|
104 |
} |
105 |
|
106 |
/*
|
107 |
* l2_load
|
108 |
*
|
109 |
* Loads a L2 table into memory. If the table is in the cache, the cache
|
110 |
* is used; otherwise the L2 table is loaded from the image file.
|
111 |
*
|
112 |
* Returns a pointer to the L2 table on success, or NULL if the read from
|
113 |
* the image file failed.
|
114 |
*/
|
115 |
|
116 |
static int l2_load(BlockDriverState *bs, uint64_t l2_offset, |
117 |
uint64_t **l2_table) |
118 |
{ |
119 |
BDRVQcowState *s = bs->opaque; |
120 |
int ret;
|
121 |
|
122 |
ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
|
123 |
|
124 |
return ret;
|
125 |
} |
126 |
|
127 |
/*
|
128 |
* Writes one sector of the L1 table to the disk (can't update single entries
|
129 |
* and we really don't want bdrv_pread to perform a read-modify-write)
|
130 |
*/
|
131 |
#define L1_ENTRIES_PER_SECTOR (512 / 8) |
132 |
static int write_l1_entry(BlockDriverState *bs, int l1_index) |
133 |
{ |
134 |
BDRVQcowState *s = bs->opaque; |
135 |
uint64_t buf[L1_ENTRIES_PER_SECTOR]; |
136 |
int l1_start_index;
|
137 |
int i, ret;
|
138 |
|
139 |
l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
|
140 |
for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) { |
141 |
buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); |
142 |
} |
143 |
|
144 |
BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); |
145 |
ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
|
146 |
buf, sizeof(buf));
|
147 |
if (ret < 0) { |
148 |
return ret;
|
149 |
} |
150 |
|
151 |
return 0; |
152 |
} |
153 |
|
154 |
/*
|
155 |
* l2_allocate
|
156 |
*
|
157 |
* Allocate a new l2 entry in the file. If l1_index points to an already
|
158 |
* used entry in the L2 table (i.e. we are doing a copy on write for the L2
|
159 |
* table) copy the contents of the old L2 table into the newly allocated one.
|
160 |
* Otherwise the new table is initialized with zeros.
|
161 |
*
|
162 |
*/
|
163 |
|
164 |
static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) |
165 |
{ |
166 |
BDRVQcowState *s = bs->opaque; |
167 |
uint64_t old_l2_offset; |
168 |
uint64_t *l2_table; |
169 |
int64_t l2_offset; |
170 |
int ret;
|
171 |
|
172 |
old_l2_offset = s->l1_table[l1_index]; |
173 |
|
174 |
trace_qcow2_l2_allocate(bs, l1_index); |
175 |
|
176 |
/* allocate a new l2 entry */
|
177 |
|
178 |
l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
|
179 |
if (l2_offset < 0) { |
180 |
return l2_offset;
|
181 |
} |
182 |
|
183 |
ret = qcow2_cache_flush(bs, s->refcount_block_cache); |
184 |
if (ret < 0) { |
185 |
goto fail;
|
186 |
} |
187 |
|
188 |
/* allocate a new entry in the l2 cache */
|
189 |
|
190 |
trace_qcow2_l2_allocate_get_empty(bs, l1_index); |
191 |
ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
|
192 |
if (ret < 0) { |
193 |
return ret;
|
194 |
} |
195 |
|
196 |
l2_table = *table; |
197 |
|
198 |
if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { |
199 |
/* if there was no old l2 table, clear the new table */
|
200 |
memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); |
201 |
} else {
|
202 |
uint64_t* old_table; |
203 |
|
204 |
/* if there was an old l2 table, read it from the disk */
|
205 |
BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); |
206 |
ret = qcow2_cache_get(bs, s->l2_table_cache, |
207 |
old_l2_offset & L1E_OFFSET_MASK, |
208 |
(void**) &old_table);
|
209 |
if (ret < 0) { |
210 |
goto fail;
|
211 |
} |
212 |
|
213 |
memcpy(l2_table, old_table, s->cluster_size); |
214 |
|
215 |
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table);
|
216 |
if (ret < 0) { |
217 |
goto fail;
|
218 |
} |
219 |
} |
220 |
|
221 |
/* write the l2 table to the file */
|
222 |
BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); |
223 |
|
224 |
trace_qcow2_l2_allocate_write_l2(bs, l1_index); |
225 |
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
226 |
ret = qcow2_cache_flush(bs, s->l2_table_cache); |
227 |
if (ret < 0) { |
228 |
goto fail;
|
229 |
} |
230 |
|
231 |
/* update the L1 entry */
|
232 |
trace_qcow2_l2_allocate_write_l1(bs, l1_index); |
233 |
s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; |
234 |
ret = write_l1_entry(bs, l1_index); |
235 |
if (ret < 0) { |
236 |
goto fail;
|
237 |
} |
238 |
|
239 |
*table = l2_table; |
240 |
trace_qcow2_l2_allocate_done(bs, l1_index, 0);
|
241 |
return 0; |
242 |
|
243 |
fail:
|
244 |
trace_qcow2_l2_allocate_done(bs, l1_index, ret); |
245 |
qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
|
246 |
s->l1_table[l1_index] = old_l2_offset; |
247 |
return ret;
|
248 |
} |
249 |
|
250 |
/*
|
251 |
* Checks how many clusters in a given L2 table are contiguous in the image
|
252 |
* file. As soon as one of the flags in the bitmask stop_flags changes compared
|
253 |
* to the first cluster, the search is stopped and the cluster is not counted
|
254 |
* as contiguous. (This allows it, for example, to stop at the first compressed
|
255 |
* cluster which may require a different handling)
|
256 |
*/
|
257 |
static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, |
258 |
uint64_t *l2_table, uint64_t start, uint64_t stop_flags) |
259 |
{ |
260 |
int i;
|
261 |
uint64_t mask = stop_flags | L2E_OFFSET_MASK; |
262 |
uint64_t offset = be64_to_cpu(l2_table[0]) & mask;
|
263 |
|
264 |
if (!offset)
|
265 |
return 0; |
266 |
|
267 |
for (i = start; i < start + nb_clusters; i++) {
|
268 |
uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask; |
269 |
if (offset + (uint64_t) i * cluster_size != l2_entry) {
|
270 |
break;
|
271 |
} |
272 |
} |
273 |
|
274 |
return (i - start);
|
275 |
} |
276 |
|
277 |
static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) |
278 |
{ |
279 |
int i;
|
280 |
|
281 |
for (i = 0; i < nb_clusters; i++) { |
282 |
int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i]));
|
283 |
|
284 |
if (type != QCOW2_CLUSTER_UNALLOCATED) {
|
285 |
break;
|
286 |
} |
287 |
} |
288 |
|
289 |
return i;
|
290 |
} |
291 |
|
292 |
/* The crypt function is compatible with the linux cryptoloop
|
293 |
algorithm for < 4 GB images. NOTE: out_buf == in_buf is
|
294 |
supported */
|
295 |
void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
|
296 |
uint8_t *out_buf, const uint8_t *in_buf,
|
297 |
int nb_sectors, int enc, |
298 |
const AES_KEY *key)
|
299 |
{ |
300 |
union {
|
301 |
uint64_t ll[2];
|
302 |
uint8_t b[16];
|
303 |
} ivec; |
304 |
int i;
|
305 |
|
306 |
for(i = 0; i < nb_sectors; i++) { |
307 |
ivec.ll[0] = cpu_to_le64(sector_num);
|
308 |
ivec.ll[1] = 0; |
309 |
AES_cbc_encrypt(in_buf, out_buf, 512, key,
|
310 |
ivec.b, enc); |
311 |
sector_num++; |
312 |
in_buf += 512;
|
313 |
out_buf += 512;
|
314 |
} |
315 |
} |
316 |
|
317 |
static int coroutine_fn copy_sectors(BlockDriverState *bs, |
318 |
uint64_t start_sect, |
319 |
uint64_t cluster_offset, |
320 |
int n_start, int n_end) |
321 |
{ |
322 |
BDRVQcowState *s = bs->opaque; |
323 |
QEMUIOVector qiov; |
324 |
struct iovec iov;
|
325 |
int n, ret;
|
326 |
|
327 |
/*
|
328 |
* If this is the last cluster and it is only partially used, we must only
|
329 |
* copy until the end of the image, or bdrv_check_request will fail for the
|
330 |
* bdrv_read/write calls below.
|
331 |
*/
|
332 |
if (start_sect + n_end > bs->total_sectors) {
|
333 |
n_end = bs->total_sectors - start_sect; |
334 |
} |
335 |
|
336 |
n = n_end - n_start; |
337 |
if (n <= 0) { |
338 |
return 0; |
339 |
} |
340 |
|
341 |
iov.iov_len = n * BDRV_SECTOR_SIZE; |
342 |
iov.iov_base = qemu_blockalign(bs, iov.iov_len); |
343 |
|
344 |
qemu_iovec_init_external(&qiov, &iov, 1);
|
345 |
|
346 |
BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); |
347 |
|
348 |
/* Call .bdrv_co_readv() directly instead of using the public block-layer
|
349 |
* interface. This avoids double I/O throttling and request tracking,
|
350 |
* which can lead to deadlock when block layer copy-on-read is enabled.
|
351 |
*/
|
352 |
ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov); |
353 |
if (ret < 0) { |
354 |
goto out;
|
355 |
} |
356 |
|
357 |
if (s->crypt_method) {
|
358 |
qcow2_encrypt_sectors(s, start_sect + n_start, |
359 |
iov.iov_base, iov.iov_base, n, 1,
|
360 |
&s->aes_encrypt_key); |
361 |
} |
362 |
|
363 |
BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); |
364 |
ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov);
|
365 |
if (ret < 0) { |
366 |
goto out;
|
367 |
} |
368 |
|
369 |
ret = 0;
|
370 |
out:
|
371 |
qemu_vfree(iov.iov_base); |
372 |
return ret;
|
373 |
} |
374 |
|
375 |
|
376 |
/*
|
377 |
* get_cluster_offset
|
378 |
*
|
379 |
* For a given offset of the disk image, find the cluster offset in
|
380 |
* qcow2 file. The offset is stored in *cluster_offset.
|
381 |
*
|
382 |
* on entry, *num is the number of contiguous sectors we'd like to
|
383 |
* access following offset.
|
384 |
*
|
385 |
* on exit, *num is the number of contiguous sectors we can read.
|
386 |
*
|
387 |
* Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
|
388 |
* cases.
|
389 |
*/
|
390 |
int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
|
391 |
int *num, uint64_t *cluster_offset)
|
392 |
{ |
393 |
BDRVQcowState *s = bs->opaque; |
394 |
unsigned int l1_index, l2_index; |
395 |
uint64_t l2_offset, *l2_table; |
396 |
int l1_bits, c;
|
397 |
unsigned int index_in_cluster, nb_clusters; |
398 |
uint64_t nb_available, nb_needed; |
399 |
int ret;
|
400 |
|
401 |
index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); |
402 |
nb_needed = *num + index_in_cluster; |
403 |
|
404 |
l1_bits = s->l2_bits + s->cluster_bits; |
405 |
|
406 |
/* compute how many bytes there are between the offset and
|
407 |
* the end of the l1 entry
|
408 |
*/
|
409 |
|
410 |
nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)); |
411 |
|
412 |
/* compute the number of available sectors */
|
413 |
|
414 |
nb_available = (nb_available >> 9) + index_in_cluster;
|
415 |
|
416 |
if (nb_needed > nb_available) {
|
417 |
nb_needed = nb_available; |
418 |
} |
419 |
|
420 |
*cluster_offset = 0;
|
421 |
|
422 |
/* seek the the l2 offset in the l1 table */
|
423 |
|
424 |
l1_index = offset >> l1_bits; |
425 |
if (l1_index >= s->l1_size) {
|
426 |
ret = QCOW2_CLUSTER_UNALLOCATED; |
427 |
goto out;
|
428 |
} |
429 |
|
430 |
l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; |
431 |
if (!l2_offset) {
|
432 |
ret = QCOW2_CLUSTER_UNALLOCATED; |
433 |
goto out;
|
434 |
} |
435 |
|
436 |
/* load the l2 table in memory */
|
437 |
|
438 |
ret = l2_load(bs, l2_offset, &l2_table); |
439 |
if (ret < 0) { |
440 |
return ret;
|
441 |
} |
442 |
|
443 |
/* find the cluster offset for the given disk offset */
|
444 |
|
445 |
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
|
446 |
*cluster_offset = be64_to_cpu(l2_table[l2_index]); |
447 |
nb_clusters = size_to_clusters(s, nb_needed << 9);
|
448 |
|
449 |
ret = qcow2_get_cluster_type(*cluster_offset); |
450 |
switch (ret) {
|
451 |
case QCOW2_CLUSTER_COMPRESSED:
|
452 |
/* Compressed clusters can only be processed one by one */
|
453 |
c = 1;
|
454 |
*cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; |
455 |
break;
|
456 |
case QCOW2_CLUSTER_ZERO:
|
457 |
c = count_contiguous_clusters(nb_clusters, s->cluster_size, |
458 |
&l2_table[l2_index], 0,
|
459 |
QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO); |
460 |
*cluster_offset = 0;
|
461 |
break;
|
462 |
case QCOW2_CLUSTER_UNALLOCATED:
|
463 |
/* how many empty clusters ? */
|
464 |
c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); |
465 |
*cluster_offset = 0;
|
466 |
break;
|
467 |
case QCOW2_CLUSTER_NORMAL:
|
468 |
/* how many allocated clusters ? */
|
469 |
c = count_contiguous_clusters(nb_clusters, s->cluster_size, |
470 |
&l2_table[l2_index], 0,
|
471 |
QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO); |
472 |
*cluster_offset &= L2E_OFFSET_MASK; |
473 |
break;
|
474 |
default:
|
475 |
abort(); |
476 |
} |
477 |
|
478 |
qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
479 |
|
480 |
nb_available = (c * s->cluster_sectors); |
481 |
|
482 |
out:
|
483 |
if (nb_available > nb_needed)
|
484 |
nb_available = nb_needed; |
485 |
|
486 |
*num = nb_available - index_in_cluster; |
487 |
|
488 |
return ret;
|
489 |
} |
490 |
|
491 |
/*
|
492 |
* get_cluster_table
|
493 |
*
|
494 |
* for a given disk offset, load (and allocate if needed)
|
495 |
* the l2 table.
|
496 |
*
|
497 |
* the l2 table offset in the qcow2 file and the cluster index
|
498 |
* in the l2 table are given to the caller.
|
499 |
*
|
500 |
* Returns 0 on success, -errno in failure case
|
501 |
*/
|
502 |
static int get_cluster_table(BlockDriverState *bs, uint64_t offset, |
503 |
uint64_t **new_l2_table, |
504 |
int *new_l2_index)
|
505 |
{ |
506 |
BDRVQcowState *s = bs->opaque; |
507 |
unsigned int l1_index, l2_index; |
508 |
uint64_t l2_offset; |
509 |
uint64_t *l2_table = NULL;
|
510 |
int ret;
|
511 |
|
512 |
/* seek the the l2 offset in the l1 table */
|
513 |
|
514 |
l1_index = offset >> (s->l2_bits + s->cluster_bits); |
515 |
if (l1_index >= s->l1_size) {
|
516 |
ret = qcow2_grow_l1_table(bs, l1_index + 1, false); |
517 |
if (ret < 0) { |
518 |
return ret;
|
519 |
} |
520 |
} |
521 |
|
522 |
l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; |
523 |
|
524 |
/* seek the l2 table of the given l2 offset */
|
525 |
|
526 |
if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
|
527 |
/* load the l2 table in memory */
|
528 |
ret = l2_load(bs, l2_offset, &l2_table); |
529 |
if (ret < 0) { |
530 |
return ret;
|
531 |
} |
532 |
} else {
|
533 |
/* First allocate a new L2 table (and do COW if needed) */
|
534 |
ret = l2_allocate(bs, l1_index, &l2_table); |
535 |
if (ret < 0) { |
536 |
return ret;
|
537 |
} |
538 |
|
539 |
/* Then decrease the refcount of the old table */
|
540 |
if (l2_offset) {
|
541 |
qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
|
542 |
} |
543 |
} |
544 |
|
545 |
/* find the cluster offset for the given disk offset */
|
546 |
|
547 |
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
|
548 |
|
549 |
*new_l2_table = l2_table; |
550 |
*new_l2_index = l2_index; |
551 |
|
552 |
return 0; |
553 |
} |
554 |
|
555 |
/*
|
556 |
* alloc_compressed_cluster_offset
|
557 |
*
|
558 |
* For a given offset of the disk image, return cluster offset in
|
559 |
* qcow2 file.
|
560 |
*
|
561 |
* If the offset is not found, allocate a new compressed cluster.
|
562 |
*
|
563 |
* Return the cluster offset if successful,
|
564 |
* Return 0, otherwise.
|
565 |
*
|
566 |
*/
|
567 |
|
568 |
uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, |
569 |
uint64_t offset, |
570 |
int compressed_size)
|
571 |
{ |
572 |
BDRVQcowState *s = bs->opaque; |
573 |
int l2_index, ret;
|
574 |
uint64_t *l2_table; |
575 |
int64_t cluster_offset; |
576 |
int nb_csectors;
|
577 |
|
578 |
ret = get_cluster_table(bs, offset, &l2_table, &l2_index); |
579 |
if (ret < 0) { |
580 |
return 0; |
581 |
} |
582 |
|
583 |
/* Compression can't overwrite anything. Fail if the cluster was already
|
584 |
* allocated. */
|
585 |
cluster_offset = be64_to_cpu(l2_table[l2_index]); |
586 |
if (cluster_offset & L2E_OFFSET_MASK) {
|
587 |
qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
588 |
return 0; |
589 |
} |
590 |
|
591 |
cluster_offset = qcow2_alloc_bytes(bs, compressed_size); |
592 |
if (cluster_offset < 0) { |
593 |
qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
594 |
return 0; |
595 |
} |
596 |
|
597 |
nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - |
598 |
(cluster_offset >> 9);
|
599 |
|
600 |
cluster_offset |= QCOW_OFLAG_COMPRESSED | |
601 |
((uint64_t)nb_csectors << s->csize_shift); |
602 |
|
603 |
/* update L2 table */
|
604 |
|
605 |
/* compressed clusters never have the copied flag */
|
606 |
|
607 |
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); |
608 |
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
609 |
l2_table[l2_index] = cpu_to_be64(cluster_offset); |
610 |
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
611 |
if (ret < 0) { |
612 |
return 0; |
613 |
} |
614 |
|
615 |
return cluster_offset;
|
616 |
} |
617 |
|
618 |
static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r) |
619 |
{ |
620 |
BDRVQcowState *s = bs->opaque; |
621 |
int ret;
|
622 |
|
623 |
if (r->nb_sectors == 0) { |
624 |
return 0; |
625 |
} |
626 |
|
627 |
qemu_co_mutex_unlock(&s->lock); |
628 |
ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset, |
629 |
r->offset / BDRV_SECTOR_SIZE, |
630 |
r->offset / BDRV_SECTOR_SIZE + r->nb_sectors); |
631 |
qemu_co_mutex_lock(&s->lock); |
632 |
|
633 |
if (ret < 0) { |
634 |
return ret;
|
635 |
} |
636 |
|
637 |
/*
|
638 |
* Before we update the L2 table to actually point to the new cluster, we
|
639 |
* need to be sure that the refcounts have been increased and COW was
|
640 |
* handled.
|
641 |
*/
|
642 |
qcow2_cache_depends_on_flush(s->l2_table_cache); |
643 |
|
644 |
return 0; |
645 |
} |
646 |
|
647 |
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
|
648 |
{ |
649 |
BDRVQcowState *s = bs->opaque; |
650 |
int i, j = 0, l2_index, ret; |
651 |
uint64_t *old_cluster, *l2_table; |
652 |
uint64_t cluster_offset = m->alloc_offset; |
653 |
|
654 |
trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); |
655 |
assert(m->nb_clusters > 0);
|
656 |
|
657 |
old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t));
|
658 |
|
659 |
/* copy content of unmodified sectors */
|
660 |
ret = perform_cow(bs, m, &m->cow_start); |
661 |
if (ret < 0) { |
662 |
goto err;
|
663 |
} |
664 |
|
665 |
ret = perform_cow(bs, m, &m->cow_end); |
666 |
if (ret < 0) { |
667 |
goto err;
|
668 |
} |
669 |
|
670 |
/* Update L2 table. */
|
671 |
if (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS) {
|
672 |
qcow2_mark_dirty(bs); |
673 |
} |
674 |
if (qcow2_need_accurate_refcounts(s)) {
|
675 |
qcow2_cache_set_dependency(bs, s->l2_table_cache, |
676 |
s->refcount_block_cache); |
677 |
} |
678 |
|
679 |
ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index); |
680 |
if (ret < 0) { |
681 |
goto err;
|
682 |
} |
683 |
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
684 |
|
685 |
for (i = 0; i < m->nb_clusters; i++) { |
686 |
/* if two concurrent writes happen to the same unallocated cluster
|
687 |
* each write allocates separate cluster and writes data concurrently.
|
688 |
* The first one to complete updates l2 table with pointer to its
|
689 |
* cluster the second one has to do RMW (which is done above by
|
690 |
* copy_sectors()), update l2 table with its cluster pointer and free
|
691 |
* old cluster. This is what this loop does */
|
692 |
if(l2_table[l2_index + i] != 0) |
693 |
old_cluster[j++] = l2_table[l2_index + i]; |
694 |
|
695 |
l2_table[l2_index + i] = cpu_to_be64((cluster_offset + |
696 |
(i << s->cluster_bits)) | QCOW_OFLAG_COPIED); |
697 |
} |
698 |
|
699 |
|
700 |
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
701 |
if (ret < 0) { |
702 |
goto err;
|
703 |
} |
704 |
|
705 |
/*
|
706 |
* If this was a COW, we need to decrease the refcount of the old cluster.
|
707 |
* Also flush bs->file to get the right order for L2 and refcount update.
|
708 |
*/
|
709 |
if (j != 0) { |
710 |
for (i = 0; i < j; i++) { |
711 |
qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1);
|
712 |
} |
713 |
} |
714 |
|
715 |
ret = 0;
|
716 |
err:
|
717 |
g_free(old_cluster); |
718 |
return ret;
|
719 |
} |
720 |
|
721 |
/*
|
722 |
* Returns the number of contiguous clusters that can be used for an allocating
|
723 |
* write, but require COW to be performed (this includes yet unallocated space,
|
724 |
* which must copy from the backing file)
|
725 |
*/
|
726 |
static int count_cow_clusters(BDRVQcowState *s, int nb_clusters, |
727 |
uint64_t *l2_table, int l2_index)
|
728 |
{ |
729 |
int i;
|
730 |
|
731 |
for (i = 0; i < nb_clusters; i++) { |
732 |
uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]); |
733 |
int cluster_type = qcow2_get_cluster_type(l2_entry);
|
734 |
|
735 |
switch(cluster_type) {
|
736 |
case QCOW2_CLUSTER_NORMAL:
|
737 |
if (l2_entry & QCOW_OFLAG_COPIED) {
|
738 |
goto out;
|
739 |
} |
740 |
break;
|
741 |
case QCOW2_CLUSTER_UNALLOCATED:
|
742 |
case QCOW2_CLUSTER_COMPRESSED:
|
743 |
case QCOW2_CLUSTER_ZERO:
|
744 |
break;
|
745 |
default:
|
746 |
abort(); |
747 |
} |
748 |
} |
749 |
|
750 |
out:
|
751 |
assert(i <= nb_clusters); |
752 |
return i;
|
753 |
} |
754 |
|
755 |
/*
|
756 |
* Check if there already is an AIO write request in flight which allocates
|
757 |
* the same cluster. In this case we need to wait until the previous
|
758 |
* request has completed and updated the L2 table accordingly.
|
759 |
*/
|
760 |
static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, |
761 |
unsigned int *nb_clusters) |
762 |
{ |
763 |
BDRVQcowState *s = bs->opaque; |
764 |
QCowL2Meta *old_alloc; |
765 |
|
766 |
QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { |
767 |
|
768 |
uint64_t start = guest_offset >> s->cluster_bits; |
769 |
uint64_t end = start + *nb_clusters; |
770 |
uint64_t old_start = old_alloc->offset >> s->cluster_bits; |
771 |
uint64_t old_end = old_start + old_alloc->nb_clusters; |
772 |
|
773 |
if (end < old_start || start > old_end) {
|
774 |
/* No intersection */
|
775 |
} else {
|
776 |
if (start < old_start) {
|
777 |
/* Stop at the start of a running allocation */
|
778 |
*nb_clusters = old_start - start; |
779 |
} else {
|
780 |
*nb_clusters = 0;
|
781 |
} |
782 |
|
783 |
if (*nb_clusters == 0) { |
784 |
/* Wait for the dependency to complete. We need to recheck
|
785 |
* the free/allocated clusters when we continue. */
|
786 |
qemu_co_mutex_unlock(&s->lock); |
787 |
qemu_co_queue_wait(&old_alloc->dependent_requests); |
788 |
qemu_co_mutex_lock(&s->lock); |
789 |
return -EAGAIN;
|
790 |
} |
791 |
} |
792 |
} |
793 |
|
794 |
if (!*nb_clusters) {
|
795 |
abort(); |
796 |
} |
797 |
|
798 |
return 0; |
799 |
} |
800 |
|
801 |
/*
|
802 |
* Allocates new clusters for the given guest_offset.
|
803 |
*
|
804 |
* At most *nb_clusters are allocated, and on return *nb_clusters is updated to
|
805 |
* contain the number of clusters that have been allocated and are contiguous
|
806 |
* in the image file.
|
807 |
*
|
808 |
* If *host_offset is non-zero, it specifies the offset in the image file at
|
809 |
* which the new clusters must start. *nb_clusters can be 0 on return in this
|
810 |
* case if the cluster at host_offset is already in use. If *host_offset is
|
811 |
* zero, the clusters can be allocated anywhere in the image file.
|
812 |
*
|
813 |
* *host_offset is updated to contain the offset into the image file at which
|
814 |
* the first allocated cluster starts.
|
815 |
*
|
816 |
* Return 0 on success and -errno in error cases. -EAGAIN means that the
|
817 |
* function has been waiting for another request and the allocation must be
|
818 |
* restarted, but the whole request should not be failed.
|
819 |
*/
|
820 |
static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, |
821 |
uint64_t *host_offset, unsigned int *nb_clusters) |
822 |
{ |
823 |
BDRVQcowState *s = bs->opaque; |
824 |
int ret;
|
825 |
|
826 |
trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, |
827 |
*host_offset, *nb_clusters); |
828 |
|
829 |
ret = handle_dependencies(bs, guest_offset, nb_clusters); |
830 |
if (ret < 0) { |
831 |
return ret;
|
832 |
} |
833 |
|
834 |
/* Allocate new clusters */
|
835 |
trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); |
836 |
if (*host_offset == 0) { |
837 |
int64_t cluster_offset = |
838 |
qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); |
839 |
if (cluster_offset < 0) { |
840 |
return cluster_offset;
|
841 |
} |
842 |
*host_offset = cluster_offset; |
843 |
return 0; |
844 |
} else {
|
845 |
ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); |
846 |
if (ret < 0) { |
847 |
return ret;
|
848 |
} |
849 |
*nb_clusters = ret; |
850 |
return 0; |
851 |
} |
852 |
} |
853 |
|
854 |
/*
|
855 |
* alloc_cluster_offset
|
856 |
*
|
857 |
* For a given offset on the virtual disk, find the cluster offset in qcow2
|
858 |
* file. If the offset is not found, allocate a new cluster.
|
859 |
*
|
860 |
* If the cluster was already allocated, m->nb_clusters is set to 0 and
|
861 |
* other fields in m are meaningless.
|
862 |
*
|
863 |
* If the cluster is newly allocated, m->nb_clusters is set to the number of
|
864 |
* contiguous clusters that have been allocated. In this case, the other
|
865 |
* fields of m are valid and contain information about the first allocated
|
866 |
* cluster.
|
867 |
*
|
868 |
* If the request conflicts with another write request in flight, the coroutine
|
869 |
* is queued and will be reentered when the dependency has completed.
|
870 |
*
|
871 |
* Return 0 on success and -errno in error cases
|
872 |
*/
|
873 |
int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
|
874 |
int n_start, int n_end, int *num, uint64_t *host_offset, QCowL2Meta **m) |
875 |
{ |
876 |
BDRVQcowState *s = bs->opaque; |
877 |
int l2_index, ret, sectors;
|
878 |
uint64_t *l2_table; |
879 |
unsigned int nb_clusters, keep_clusters; |
880 |
uint64_t cluster_offset; |
881 |
|
882 |
trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, |
883 |
n_start, n_end); |
884 |
|
885 |
/* Find L2 entry for the first involved cluster */
|
886 |
again:
|
887 |
ret = get_cluster_table(bs, offset, &l2_table, &l2_index); |
888 |
if (ret < 0) { |
889 |
return ret;
|
890 |
} |
891 |
|
892 |
/*
|
893 |
* Calculate the number of clusters to look for. We stop at L2 table
|
894 |
* boundaries to keep things simple.
|
895 |
*/
|
896 |
nb_clusters = MIN(size_to_clusters(s, n_end << BDRV_SECTOR_BITS), |
897 |
s->l2_size - l2_index); |
898 |
|
899 |
cluster_offset = be64_to_cpu(l2_table[l2_index]); |
900 |
|
901 |
/*
|
902 |
* Check how many clusters are already allocated and don't need COW, and how
|
903 |
* many need a new allocation.
|
904 |
*/
|
905 |
if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
|
906 |
&& (cluster_offset & QCOW_OFLAG_COPIED)) |
907 |
{ |
908 |
/* We keep all QCOW_OFLAG_COPIED clusters */
|
909 |
keep_clusters = |
910 |
count_contiguous_clusters(nb_clusters, s->cluster_size, |
911 |
&l2_table[l2_index], 0,
|
912 |
QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); |
913 |
assert(keep_clusters <= nb_clusters); |
914 |
nb_clusters -= keep_clusters; |
915 |
} else {
|
916 |
keep_clusters = 0;
|
917 |
cluster_offset = 0;
|
918 |
} |
919 |
|
920 |
if (nb_clusters > 0) { |
921 |
/* For the moment, overwrite compressed clusters one by one */
|
922 |
uint64_t entry = be64_to_cpu(l2_table[l2_index + keep_clusters]); |
923 |
if (entry & QCOW_OFLAG_COMPRESSED) {
|
924 |
nb_clusters = 1;
|
925 |
} else {
|
926 |
nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, |
927 |
l2_index + keep_clusters); |
928 |
} |
929 |
} |
930 |
|
931 |
cluster_offset &= L2E_OFFSET_MASK; |
932 |
|
933 |
/*
|
934 |
* The L2 table isn't used any more after this. As long as the cache works
|
935 |
* synchronously, it's important to release it before calling
|
936 |
* do_alloc_cluster_offset, which may yield if we need to wait for another
|
937 |
* request to complete. If we still had the reference, we could use up the
|
938 |
* whole cache with sleeping requests.
|
939 |
*/
|
940 |
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
941 |
if (ret < 0) { |
942 |
return ret;
|
943 |
} |
944 |
|
945 |
/* If there is something left to allocate, do that now */
|
946 |
if (nb_clusters > 0) { |
947 |
uint64_t alloc_offset; |
948 |
uint64_t alloc_cluster_offset; |
949 |
uint64_t keep_bytes = keep_clusters * s->cluster_size; |
950 |
|
951 |
/* Calculate start and size of allocation */
|
952 |
alloc_offset = offset + keep_bytes; |
953 |
|
954 |
if (keep_clusters == 0) { |
955 |
alloc_cluster_offset = 0;
|
956 |
} else {
|
957 |
alloc_cluster_offset = cluster_offset + keep_bytes; |
958 |
} |
959 |
|
960 |
/* Allocate, if necessary at a given offset in the image file */
|
961 |
ret = do_alloc_cluster_offset(bs, alloc_offset, &alloc_cluster_offset, |
962 |
&nb_clusters); |
963 |
if (ret == -EAGAIN) {
|
964 |
goto again;
|
965 |
} else if (ret < 0) { |
966 |
goto fail;
|
967 |
} |
968 |
|
969 |
/* save info needed for meta data update */
|
970 |
if (nb_clusters > 0) { |
971 |
/*
|
972 |
* requested_sectors: Number of sectors from the start of the first
|
973 |
* newly allocated cluster to the end of the (possibly shortened
|
974 |
* before) write request.
|
975 |
*
|
976 |
* avail_sectors: Number of sectors from the start of the first
|
977 |
* newly allocated to the end of the last newly allocated cluster.
|
978 |
*
|
979 |
* nb_sectors: The number of sectors from the start of the first
|
980 |
* newly allocated cluster to the end of the aread that the write
|
981 |
* request actually writes to (excluding COW at the end)
|
982 |
*/
|
983 |
int requested_sectors = n_end - keep_clusters * s->cluster_sectors;
|
984 |
int avail_sectors = nb_clusters
|
985 |
<< (s->cluster_bits - BDRV_SECTOR_BITS); |
986 |
int alloc_n_start = keep_clusters == 0 ? n_start : 0; |
987 |
int nb_sectors = MIN(requested_sectors, avail_sectors);
|
988 |
|
989 |
if (keep_clusters == 0) { |
990 |
cluster_offset = alloc_cluster_offset; |
991 |
} |
992 |
|
993 |
*m = g_malloc0(sizeof(**m));
|
994 |
|
995 |
**m = (QCowL2Meta) { |
996 |
.alloc_offset = alloc_cluster_offset, |
997 |
.offset = alloc_offset & ~(s->cluster_size - 1),
|
998 |
.nb_clusters = nb_clusters, |
999 |
.nb_available = nb_sectors, |
1000 |
|
1001 |
.cow_start = { |
1002 |
.offset = 0,
|
1003 |
.nb_sectors = alloc_n_start, |
1004 |
}, |
1005 |
.cow_end = { |
1006 |
.offset = nb_sectors * BDRV_SECTOR_SIZE, |
1007 |
.nb_sectors = avail_sectors - nb_sectors, |
1008 |
}, |
1009 |
}; |
1010 |
qemu_co_queue_init(&(*m)->dependent_requests); |
1011 |
QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); |
1012 |
} |
1013 |
} |
1014 |
|
1015 |
/* Some cleanup work */
|
1016 |
sectors = (keep_clusters + nb_clusters) << (s->cluster_bits - 9);
|
1017 |
if (sectors > n_end) {
|
1018 |
sectors = n_end; |
1019 |
} |
1020 |
|
1021 |
assert(sectors > n_start); |
1022 |
*num = sectors - n_start; |
1023 |
*host_offset = cluster_offset; |
1024 |
|
1025 |
return 0; |
1026 |
|
1027 |
fail:
|
1028 |
if (*m && (*m)->nb_clusters > 0) { |
1029 |
QLIST_REMOVE(*m, next_in_flight); |
1030 |
} |
1031 |
return ret;
|
1032 |
} |
1033 |
|
1034 |
static int decompress_buffer(uint8_t *out_buf, int out_buf_size, |
1035 |
const uint8_t *buf, int buf_size) |
1036 |
{ |
1037 |
z_stream strm1, *strm = &strm1; |
1038 |
int ret, out_len;
|
1039 |
|
1040 |
memset(strm, 0, sizeof(*strm)); |
1041 |
|
1042 |
strm->next_in = (uint8_t *)buf; |
1043 |
strm->avail_in = buf_size; |
1044 |
strm->next_out = out_buf; |
1045 |
strm->avail_out = out_buf_size; |
1046 |
|
1047 |
ret = inflateInit2(strm, -12);
|
1048 |
if (ret != Z_OK)
|
1049 |
return -1; |
1050 |
ret = inflate(strm, Z_FINISH); |
1051 |
out_len = strm->next_out - out_buf; |
1052 |
if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
|
1053 |
out_len != out_buf_size) { |
1054 |
inflateEnd(strm); |
1055 |
return -1; |
1056 |
} |
1057 |
inflateEnd(strm); |
1058 |
return 0; |
1059 |
} |
1060 |
|
1061 |
int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
|
1062 |
{ |
1063 |
BDRVQcowState *s = bs->opaque; |
1064 |
int ret, csize, nb_csectors, sector_offset;
|
1065 |
uint64_t coffset; |
1066 |
|
1067 |
coffset = cluster_offset & s->cluster_offset_mask; |
1068 |
if (s->cluster_cache_offset != coffset) {
|
1069 |
nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
|
1070 |
sector_offset = coffset & 511;
|
1071 |
csize = nb_csectors * 512 - sector_offset;
|
1072 |
BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); |
1073 |
ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
|
1074 |
if (ret < 0) { |
1075 |
return ret;
|
1076 |
} |
1077 |
if (decompress_buffer(s->cluster_cache, s->cluster_size,
|
1078 |
s->cluster_data + sector_offset, csize) < 0) {
|
1079 |
return -EIO;
|
1080 |
} |
1081 |
s->cluster_cache_offset = coffset; |
1082 |
} |
1083 |
return 0; |
1084 |
} |
1085 |
|
1086 |
/*
|
1087 |
* This discards as many clusters of nb_clusters as possible at once (i.e.
|
1088 |
* all clusters in the same L2 table) and returns the number of discarded
|
1089 |
* clusters.
|
1090 |
*/
|
1091 |
static int discard_single_l2(BlockDriverState *bs, uint64_t offset, |
1092 |
unsigned int nb_clusters) |
1093 |
{ |
1094 |
BDRVQcowState *s = bs->opaque; |
1095 |
uint64_t *l2_table; |
1096 |
int l2_index;
|
1097 |
int ret;
|
1098 |
int i;
|
1099 |
|
1100 |
ret = get_cluster_table(bs, offset, &l2_table, &l2_index); |
1101 |
if (ret < 0) { |
1102 |
return ret;
|
1103 |
} |
1104 |
|
1105 |
/* Limit nb_clusters to one L2 table */
|
1106 |
nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); |
1107 |
|
1108 |
for (i = 0; i < nb_clusters; i++) { |
1109 |
uint64_t old_offset; |
1110 |
|
1111 |
old_offset = be64_to_cpu(l2_table[l2_index + i]); |
1112 |
if ((old_offset & L2E_OFFSET_MASK) == 0) { |
1113 |
continue;
|
1114 |
} |
1115 |
|
1116 |
/* First remove L2 entries */
|
1117 |
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
1118 |
l2_table[l2_index + i] = cpu_to_be64(0);
|
1119 |
|
1120 |
/* Then decrease the refcount */
|
1121 |
qcow2_free_any_clusters(bs, old_offset, 1);
|
1122 |
} |
1123 |
|
1124 |
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
1125 |
if (ret < 0) { |
1126 |
return ret;
|
1127 |
} |
1128 |
|
1129 |
return nb_clusters;
|
1130 |
} |
1131 |
|
1132 |
int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
|
1133 |
int nb_sectors)
|
1134 |
{ |
1135 |
BDRVQcowState *s = bs->opaque; |
1136 |
uint64_t end_offset; |
1137 |
unsigned int nb_clusters; |
1138 |
int ret;
|
1139 |
|
1140 |
end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS); |
1141 |
|
1142 |
/* Round start up and end down */
|
1143 |
offset = align_offset(offset, s->cluster_size); |
1144 |
end_offset &= ~(s->cluster_size - 1);
|
1145 |
|
1146 |
if (offset > end_offset) {
|
1147 |
return 0; |
1148 |
} |
1149 |
|
1150 |
nb_clusters = size_to_clusters(s, end_offset - offset); |
1151 |
|
1152 |
/* Each L2 table is handled by its own loop iteration */
|
1153 |
while (nb_clusters > 0) { |
1154 |
ret = discard_single_l2(bs, offset, nb_clusters); |
1155 |
if (ret < 0) { |
1156 |
return ret;
|
1157 |
} |
1158 |
|
1159 |
nb_clusters -= ret; |
1160 |
offset += (ret * s->cluster_size); |
1161 |
} |
1162 |
|
1163 |
return 0; |
1164 |
} |
1165 |
|
1166 |
/*
|
1167 |
* This zeroes as many clusters of nb_clusters as possible at once (i.e.
|
1168 |
* all clusters in the same L2 table) and returns the number of zeroed
|
1169 |
* clusters.
|
1170 |
*/
|
1171 |
static int zero_single_l2(BlockDriverState *bs, uint64_t offset, |
1172 |
unsigned int nb_clusters) |
1173 |
{ |
1174 |
BDRVQcowState *s = bs->opaque; |
1175 |
uint64_t *l2_table; |
1176 |
int l2_index;
|
1177 |
int ret;
|
1178 |
int i;
|
1179 |
|
1180 |
ret = get_cluster_table(bs, offset, &l2_table, &l2_index); |
1181 |
if (ret < 0) { |
1182 |
return ret;
|
1183 |
} |
1184 |
|
1185 |
/* Limit nb_clusters to one L2 table */
|
1186 |
nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); |
1187 |
|
1188 |
for (i = 0; i < nb_clusters; i++) { |
1189 |
uint64_t old_offset; |
1190 |
|
1191 |
old_offset = be64_to_cpu(l2_table[l2_index + i]); |
1192 |
|
1193 |
/* Update L2 entries */
|
1194 |
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
1195 |
if (old_offset & QCOW_OFLAG_COMPRESSED) {
|
1196 |
l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); |
1197 |
qcow2_free_any_clusters(bs, old_offset, 1);
|
1198 |
} else {
|
1199 |
l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); |
1200 |
} |
1201 |
} |
1202 |
|
1203 |
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
1204 |
if (ret < 0) { |
1205 |
return ret;
|
1206 |
} |
1207 |
|
1208 |
return nb_clusters;
|
1209 |
} |
1210 |
|
1211 |
int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors) |
1212 |
{ |
1213 |
BDRVQcowState *s = bs->opaque; |
1214 |
unsigned int nb_clusters; |
1215 |
int ret;
|
1216 |
|
1217 |
/* The zero flag is only supported by version 3 and newer */
|
1218 |
if (s->qcow_version < 3) { |
1219 |
return -ENOTSUP;
|
1220 |
} |
1221 |
|
1222 |
/* Each L2 table is handled by its own loop iteration */
|
1223 |
nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS); |
1224 |
|
1225 |
while (nb_clusters > 0) { |
1226 |
ret = zero_single_l2(bs, offset, nb_clusters); |
1227 |
if (ret < 0) { |
1228 |
return ret;
|
1229 |
} |
1230 |
|
1231 |
nb_clusters -= ret; |
1232 |
offset += (ret * s->cluster_size); |
1233 |
} |
1234 |
|
1235 |
return 0; |
1236 |
} |