root / block / qcow2-cluster.c @ 1d3afd64
History | View | Annotate | Download (35.4 kB)
1 | 45aba42f | Kevin Wolf | /*
|
---|---|---|---|
2 | 45aba42f | Kevin Wolf | * Block driver for the QCOW version 2 format
|
3 | 45aba42f | Kevin Wolf | *
|
4 | 45aba42f | Kevin Wolf | * Copyright (c) 2004-2006 Fabrice Bellard
|
5 | 45aba42f | Kevin Wolf | *
|
6 | 45aba42f | Kevin Wolf | * Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 | 45aba42f | Kevin Wolf | * of this software and associated documentation files (the "Software"), to deal
|
8 | 45aba42f | Kevin Wolf | * in the Software without restriction, including without limitation the rights
|
9 | 45aba42f | Kevin Wolf | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 | 45aba42f | Kevin Wolf | * copies of the Software, and to permit persons to whom the Software is
|
11 | 45aba42f | Kevin Wolf | * furnished to do so, subject to the following conditions:
|
12 | 45aba42f | Kevin Wolf | *
|
13 | 45aba42f | Kevin Wolf | * The above copyright notice and this permission notice shall be included in
|
14 | 45aba42f | Kevin Wolf | * all copies or substantial portions of the Software.
|
15 | 45aba42f | Kevin Wolf | *
|
16 | 45aba42f | Kevin Wolf | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 | 45aba42f | Kevin Wolf | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 | 45aba42f | Kevin Wolf | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
19 | 45aba42f | Kevin Wolf | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 | 45aba42f | Kevin Wolf | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 | 45aba42f | Kevin Wolf | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
22 | 45aba42f | Kevin Wolf | * THE SOFTWARE.
|
23 | 45aba42f | Kevin Wolf | */
|
24 | 45aba42f | Kevin Wolf | |
25 | 45aba42f | Kevin Wolf | #include <zlib.h> |
26 | 45aba42f | Kevin Wolf | |
27 | 45aba42f | Kevin Wolf | #include "qemu-common.h" |
28 | 45aba42f | Kevin Wolf | #include "block_int.h" |
29 | 45aba42f | Kevin Wolf | #include "block/qcow2.h" |
30 | 3cce16f4 | Kevin Wolf | #include "trace.h" |
31 | 45aba42f | Kevin Wolf | |
32 | 72893756 | Stefan Hajnoczi | int qcow2_grow_l1_table(BlockDriverState *bs, int min_size, bool exact_size) |
33 | 45aba42f | Kevin Wolf | { |
34 | 45aba42f | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
35 | 45aba42f | Kevin Wolf | int new_l1_size, new_l1_size2, ret, i;
|
36 | 45aba42f | Kevin Wolf | uint64_t *new_l1_table; |
37 | 5d757b56 | Kevin Wolf | int64_t new_l1_table_offset; |
38 | 45aba42f | Kevin Wolf | uint8_t data[12];
|
39 | 45aba42f | Kevin Wolf | |
40 | 72893756 | Stefan Hajnoczi | if (min_size <= s->l1_size)
|
41 | 45aba42f | Kevin Wolf | return 0; |
42 | 72893756 | Stefan Hajnoczi | |
43 | 72893756 | Stefan Hajnoczi | if (exact_size) {
|
44 | 72893756 | Stefan Hajnoczi | new_l1_size = min_size; |
45 | 72893756 | Stefan Hajnoczi | } else {
|
46 | 72893756 | Stefan Hajnoczi | /* Bump size up to reduce the number of times we have to grow */
|
47 | 72893756 | Stefan Hajnoczi | new_l1_size = s->l1_size; |
48 | 72893756 | Stefan Hajnoczi | if (new_l1_size == 0) { |
49 | 72893756 | Stefan Hajnoczi | new_l1_size = 1;
|
50 | 72893756 | Stefan Hajnoczi | } |
51 | 72893756 | Stefan Hajnoczi | while (min_size > new_l1_size) {
|
52 | 72893756 | Stefan Hajnoczi | new_l1_size = (new_l1_size * 3 + 1) / 2; |
53 | 72893756 | Stefan Hajnoczi | } |
54 | 45aba42f | Kevin Wolf | } |
55 | 72893756 | Stefan Hajnoczi | |
56 | 45aba42f | Kevin Wolf | #ifdef DEBUG_ALLOC2
|
57 | 35ee5e39 | Frediano Ziglio | fprintf(stderr, "grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
|
58 | 45aba42f | Kevin Wolf | #endif
|
59 | 45aba42f | Kevin Wolf | |
60 | 45aba42f | Kevin Wolf | new_l1_size2 = sizeof(uint64_t) * new_l1_size;
|
61 | 7267c094 | Anthony Liguori | new_l1_table = g_malloc0(align_offset(new_l1_size2, 512));
|
62 | 45aba42f | Kevin Wolf | memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
|
63 | 45aba42f | Kevin Wolf | |
64 | 45aba42f | Kevin Wolf | /* write new table (align to cluster) */
|
65 | 66f82cee | Kevin Wolf | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); |
66 | ed6ccf0f | Kevin Wolf | new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); |
67 | 5d757b56 | Kevin Wolf | if (new_l1_table_offset < 0) { |
68 | 7267c094 | Anthony Liguori | g_free(new_l1_table); |
69 | 5d757b56 | Kevin Wolf | return new_l1_table_offset;
|
70 | 5d757b56 | Kevin Wolf | } |
71 | 29c1a730 | Kevin Wolf | |
72 | 29c1a730 | Kevin Wolf | ret = qcow2_cache_flush(bs, s->refcount_block_cache); |
73 | 29c1a730 | Kevin Wolf | if (ret < 0) { |
74 | 80fa3341 | Kevin Wolf | goto fail;
|
75 | 29c1a730 | Kevin Wolf | } |
76 | 45aba42f | Kevin Wolf | |
77 | 66f82cee | Kevin Wolf | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); |
78 | 45aba42f | Kevin Wolf | for(i = 0; i < s->l1_size; i++) |
79 | 45aba42f | Kevin Wolf | new_l1_table[i] = cpu_to_be64(new_l1_table[i]); |
80 | 8b3b7206 | Kevin Wolf | ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2); |
81 | 8b3b7206 | Kevin Wolf | if (ret < 0) |
82 | 45aba42f | Kevin Wolf | goto fail;
|
83 | 45aba42f | Kevin Wolf | for(i = 0; i < s->l1_size; i++) |
84 | 45aba42f | Kevin Wolf | new_l1_table[i] = be64_to_cpu(new_l1_table[i]); |
85 | 45aba42f | Kevin Wolf | |
86 | 45aba42f | Kevin Wolf | /* set new table */
|
87 | 66f82cee | Kevin Wolf | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); |
88 | 45aba42f | Kevin Wolf | cpu_to_be32w((uint32_t*)data, new_l1_size); |
89 | 653df36b | Aurelien Jarno | cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset);
|
90 | 8b3b7206 | Kevin Wolf | ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
|
91 | 8b3b7206 | Kevin Wolf | if (ret < 0) { |
92 | 45aba42f | Kevin Wolf | goto fail;
|
93 | fb8fa77c | Kevin Wolf | } |
94 | 7267c094 | Anthony Liguori | g_free(s->l1_table); |
95 | ed6ccf0f | Kevin Wolf | qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
|
96 | 45aba42f | Kevin Wolf | s->l1_table_offset = new_l1_table_offset; |
97 | 45aba42f | Kevin Wolf | s->l1_table = new_l1_table; |
98 | 45aba42f | Kevin Wolf | s->l1_size = new_l1_size; |
99 | 45aba42f | Kevin Wolf | return 0; |
100 | 45aba42f | Kevin Wolf | fail:
|
101 | 7267c094 | Anthony Liguori | g_free(new_l1_table); |
102 | fb8fa77c | Kevin Wolf | qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2); |
103 | 8b3b7206 | Kevin Wolf | return ret;
|
104 | 45aba42f | Kevin Wolf | } |
105 | 45aba42f | Kevin Wolf | |
106 | 45aba42f | Kevin Wolf | /*
|
107 | 45aba42f | Kevin Wolf | * l2_load
|
108 | 45aba42f | Kevin Wolf | *
|
109 | 45aba42f | Kevin Wolf | * Loads a L2 table into memory. If the table is in the cache, the cache
|
110 | 45aba42f | Kevin Wolf | * is used; otherwise the L2 table is loaded from the image file.
|
111 | 45aba42f | Kevin Wolf | *
|
112 | 45aba42f | Kevin Wolf | * Returns a pointer to the L2 table on success, or NULL if the read from
|
113 | 45aba42f | Kevin Wolf | * the image file failed.
|
114 | 45aba42f | Kevin Wolf | */
|
115 | 45aba42f | Kevin Wolf | |
116 | 55c17e98 | Kevin Wolf | static int l2_load(BlockDriverState *bs, uint64_t l2_offset, |
117 | 55c17e98 | Kevin Wolf | uint64_t **l2_table) |
118 | 45aba42f | Kevin Wolf | { |
119 | 45aba42f | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
120 | 55c17e98 | Kevin Wolf | int ret;
|
121 | 45aba42f | Kevin Wolf | |
122 | 29c1a730 | Kevin Wolf | ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
|
123 | 45aba42f | Kevin Wolf | |
124 | 29c1a730 | Kevin Wolf | return ret;
|
125 | 45aba42f | Kevin Wolf | } |
126 | 45aba42f | Kevin Wolf | |
127 | 45aba42f | Kevin Wolf | /*
|
128 | 6583e3c7 | Kevin Wolf | * Writes one sector of the L1 table to the disk (can't update single entries
|
129 | 6583e3c7 | Kevin Wolf | * and we really don't want bdrv_pread to perform a read-modify-write)
|
130 | 6583e3c7 | Kevin Wolf | */
|
131 | 6583e3c7 | Kevin Wolf | #define L1_ENTRIES_PER_SECTOR (512 / 8) |
132 | 66f82cee | Kevin Wolf | static int write_l1_entry(BlockDriverState *bs, int l1_index) |
133 | 6583e3c7 | Kevin Wolf | { |
134 | 66f82cee | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
135 | 6583e3c7 | Kevin Wolf | uint64_t buf[L1_ENTRIES_PER_SECTOR]; |
136 | 6583e3c7 | Kevin Wolf | int l1_start_index;
|
137 | f7defcb6 | Kevin Wolf | int i, ret;
|
138 | 6583e3c7 | Kevin Wolf | |
139 | 6583e3c7 | Kevin Wolf | l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
|
140 | 6583e3c7 | Kevin Wolf | for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) { |
141 | 6583e3c7 | Kevin Wolf | buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); |
142 | 6583e3c7 | Kevin Wolf | } |
143 | 6583e3c7 | Kevin Wolf | |
144 | 66f82cee | Kevin Wolf | BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); |
145 | 8b3b7206 | Kevin Wolf | ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
|
146 | f7defcb6 | Kevin Wolf | buf, sizeof(buf));
|
147 | f7defcb6 | Kevin Wolf | if (ret < 0) { |
148 | f7defcb6 | Kevin Wolf | return ret;
|
149 | 6583e3c7 | Kevin Wolf | } |
150 | 6583e3c7 | Kevin Wolf | |
151 | 6583e3c7 | Kevin Wolf | return 0; |
152 | 6583e3c7 | Kevin Wolf | } |
153 | 6583e3c7 | Kevin Wolf | |
154 | 6583e3c7 | Kevin Wolf | /*
|
155 | 45aba42f | Kevin Wolf | * l2_allocate
|
156 | 45aba42f | Kevin Wolf | *
|
157 | 45aba42f | Kevin Wolf | * Allocate a new l2 entry in the file. If l1_index points to an already
|
158 | 45aba42f | Kevin Wolf | * used entry in the L2 table (i.e. we are doing a copy on write for the L2
|
159 | 45aba42f | Kevin Wolf | * table) copy the contents of the old L2 table into the newly allocated one.
|
160 | 45aba42f | Kevin Wolf | * Otherwise the new table is initialized with zeros.
|
161 | 45aba42f | Kevin Wolf | *
|
162 | 45aba42f | Kevin Wolf | */
|
163 | 45aba42f | Kevin Wolf | |
164 | c46e1167 | Kevin Wolf | static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) |
165 | 45aba42f | Kevin Wolf | { |
166 | 45aba42f | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
167 | 6583e3c7 | Kevin Wolf | uint64_t old_l2_offset; |
168 | f4f0d391 | Kevin Wolf | uint64_t *l2_table; |
169 | f4f0d391 | Kevin Wolf | int64_t l2_offset; |
170 | c46e1167 | Kevin Wolf | int ret;
|
171 | 45aba42f | Kevin Wolf | |
172 | 45aba42f | Kevin Wolf | old_l2_offset = s->l1_table[l1_index]; |
173 | 45aba42f | Kevin Wolf | |
174 | 3cce16f4 | Kevin Wolf | trace_qcow2_l2_allocate(bs, l1_index); |
175 | 3cce16f4 | Kevin Wolf | |
176 | 45aba42f | Kevin Wolf | /* allocate a new l2 entry */
|
177 | 45aba42f | Kevin Wolf | |
178 | ed6ccf0f | Kevin Wolf | l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
|
179 | 5d757b56 | Kevin Wolf | if (l2_offset < 0) { |
180 | c46e1167 | Kevin Wolf | return l2_offset;
|
181 | 5d757b56 | Kevin Wolf | } |
182 | 29c1a730 | Kevin Wolf | |
183 | 29c1a730 | Kevin Wolf | ret = qcow2_cache_flush(bs, s->refcount_block_cache); |
184 | 29c1a730 | Kevin Wolf | if (ret < 0) { |
185 | 29c1a730 | Kevin Wolf | goto fail;
|
186 | 29c1a730 | Kevin Wolf | } |
187 | 45aba42f | Kevin Wolf | |
188 | 45aba42f | Kevin Wolf | /* allocate a new entry in the l2 cache */
|
189 | 45aba42f | Kevin Wolf | |
190 | 3cce16f4 | Kevin Wolf | trace_qcow2_l2_allocate_get_empty(bs, l1_index); |
191 | 29c1a730 | Kevin Wolf | ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
|
192 | 29c1a730 | Kevin Wolf | if (ret < 0) { |
193 | 29c1a730 | Kevin Wolf | return ret;
|
194 | 29c1a730 | Kevin Wolf | } |
195 | 29c1a730 | Kevin Wolf | |
196 | 29c1a730 | Kevin Wolf | l2_table = *table; |
197 | 45aba42f | Kevin Wolf | |
198 | 8e37f681 | Kevin Wolf | if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { |
199 | 45aba42f | Kevin Wolf | /* if there was no old l2 table, clear the new table */
|
200 | 45aba42f | Kevin Wolf | memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); |
201 | 45aba42f | Kevin Wolf | } else {
|
202 | 29c1a730 | Kevin Wolf | uint64_t* old_table; |
203 | 29c1a730 | Kevin Wolf | |
204 | 45aba42f | Kevin Wolf | /* if there was an old l2 table, read it from the disk */
|
205 | 66f82cee | Kevin Wolf | BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); |
206 | 8e37f681 | Kevin Wolf | ret = qcow2_cache_get(bs, s->l2_table_cache, |
207 | 8e37f681 | Kevin Wolf | old_l2_offset & L1E_OFFSET_MASK, |
208 | 29c1a730 | Kevin Wolf | (void**) &old_table);
|
209 | 29c1a730 | Kevin Wolf | if (ret < 0) { |
210 | 29c1a730 | Kevin Wolf | goto fail;
|
211 | 29c1a730 | Kevin Wolf | } |
212 | 29c1a730 | Kevin Wolf | |
213 | 29c1a730 | Kevin Wolf | memcpy(l2_table, old_table, s->cluster_size); |
214 | 29c1a730 | Kevin Wolf | |
215 | 29c1a730 | Kevin Wolf | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table);
|
216 | c46e1167 | Kevin Wolf | if (ret < 0) { |
217 | 175e1152 | Kevin Wolf | goto fail;
|
218 | c46e1167 | Kevin Wolf | } |
219 | 45aba42f | Kevin Wolf | } |
220 | 29c1a730 | Kevin Wolf | |
221 | 45aba42f | Kevin Wolf | /* write the l2 table to the file */
|
222 | 66f82cee | Kevin Wolf | BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); |
223 | 29c1a730 | Kevin Wolf | |
224 | 3cce16f4 | Kevin Wolf | trace_qcow2_l2_allocate_write_l2(bs, l1_index); |
225 | 29c1a730 | Kevin Wolf | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
226 | 29c1a730 | Kevin Wolf | ret = qcow2_cache_flush(bs, s->l2_table_cache); |
227 | c46e1167 | Kevin Wolf | if (ret < 0) { |
228 | 175e1152 | Kevin Wolf | goto fail;
|
229 | 175e1152 | Kevin Wolf | } |
230 | 175e1152 | Kevin Wolf | |
231 | 175e1152 | Kevin Wolf | /* update the L1 entry */
|
232 | 3cce16f4 | Kevin Wolf | trace_qcow2_l2_allocate_write_l1(bs, l1_index); |
233 | 175e1152 | Kevin Wolf | s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; |
234 | 175e1152 | Kevin Wolf | ret = write_l1_entry(bs, l1_index); |
235 | 175e1152 | Kevin Wolf | if (ret < 0) { |
236 | 175e1152 | Kevin Wolf | goto fail;
|
237 | c46e1167 | Kevin Wolf | } |
238 | 45aba42f | Kevin Wolf | |
239 | c46e1167 | Kevin Wolf | *table = l2_table; |
240 | 3cce16f4 | Kevin Wolf | trace_qcow2_l2_allocate_done(bs, l1_index, 0);
|
241 | c46e1167 | Kevin Wolf | return 0; |
242 | 175e1152 | Kevin Wolf | |
243 | 175e1152 | Kevin Wolf | fail:
|
244 | 3cce16f4 | Kevin Wolf | trace_qcow2_l2_allocate_done(bs, l1_index, ret); |
245 | 29c1a730 | Kevin Wolf | qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
|
246 | 68dba0bf | Kevin Wolf | s->l1_table[l1_index] = old_l2_offset; |
247 | 175e1152 | Kevin Wolf | return ret;
|
248 | 45aba42f | Kevin Wolf | } |
249 | 45aba42f | Kevin Wolf | |
250 | 2bfcc4a0 | Kevin Wolf | /*
|
251 | 2bfcc4a0 | Kevin Wolf | * Checks how many clusters in a given L2 table are contiguous in the image
|
252 | 2bfcc4a0 | Kevin Wolf | * file. As soon as one of the flags in the bitmask stop_flags changes compared
|
253 | 2bfcc4a0 | Kevin Wolf | * to the first cluster, the search is stopped and the cluster is not counted
|
254 | 2bfcc4a0 | Kevin Wolf | * as contiguous. (This allows it, for example, to stop at the first compressed
|
255 | 2bfcc4a0 | Kevin Wolf | * cluster which may require a different handling)
|
256 | 2bfcc4a0 | Kevin Wolf | */
|
257 | 45aba42f | Kevin Wolf | static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, |
258 | 2bfcc4a0 | Kevin Wolf | uint64_t *l2_table, uint64_t start, uint64_t stop_flags) |
259 | 45aba42f | Kevin Wolf | { |
260 | 45aba42f | Kevin Wolf | int i;
|
261 | 2bfcc4a0 | Kevin Wolf | uint64_t mask = stop_flags | L2E_OFFSET_MASK; |
262 | 2bfcc4a0 | Kevin Wolf | uint64_t offset = be64_to_cpu(l2_table[0]) & mask;
|
263 | 45aba42f | Kevin Wolf | |
264 | 45aba42f | Kevin Wolf | if (!offset)
|
265 | 45aba42f | Kevin Wolf | return 0; |
266 | 45aba42f | Kevin Wolf | |
267 | 2bfcc4a0 | Kevin Wolf | for (i = start; i < start + nb_clusters; i++) {
|
268 | 2bfcc4a0 | Kevin Wolf | uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask; |
269 | 2bfcc4a0 | Kevin Wolf | if (offset + (uint64_t) i * cluster_size != l2_entry) {
|
270 | 45aba42f | Kevin Wolf | break;
|
271 | 2bfcc4a0 | Kevin Wolf | } |
272 | 2bfcc4a0 | Kevin Wolf | } |
273 | 45aba42f | Kevin Wolf | |
274 | 45aba42f | Kevin Wolf | return (i - start);
|
275 | 45aba42f | Kevin Wolf | } |
276 | 45aba42f | Kevin Wolf | |
277 | 45aba42f | Kevin Wolf | static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) |
278 | 45aba42f | Kevin Wolf | { |
279 | 2bfcc4a0 | Kevin Wolf | int i;
|
280 | 2bfcc4a0 | Kevin Wolf | |
281 | 2bfcc4a0 | Kevin Wolf | for (i = 0; i < nb_clusters; i++) { |
282 | 2bfcc4a0 | Kevin Wolf | int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i]));
|
283 | 45aba42f | Kevin Wolf | |
284 | 2bfcc4a0 | Kevin Wolf | if (type != QCOW2_CLUSTER_UNALLOCATED) {
|
285 | 2bfcc4a0 | Kevin Wolf | break;
|
286 | 2bfcc4a0 | Kevin Wolf | } |
287 | 2bfcc4a0 | Kevin Wolf | } |
288 | 45aba42f | Kevin Wolf | |
289 | 45aba42f | Kevin Wolf | return i;
|
290 | 45aba42f | Kevin Wolf | } |
291 | 45aba42f | Kevin Wolf | |
292 | 45aba42f | Kevin Wolf | /* The crypt function is compatible with the linux cryptoloop
|
293 | 45aba42f | Kevin Wolf | algorithm for < 4 GB images. NOTE: out_buf == in_buf is
|
294 | 45aba42f | Kevin Wolf | supported */
|
295 | ed6ccf0f | Kevin Wolf | void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
|
296 | ed6ccf0f | Kevin Wolf | uint8_t *out_buf, const uint8_t *in_buf,
|
297 | ed6ccf0f | Kevin Wolf | int nb_sectors, int enc, |
298 | ed6ccf0f | Kevin Wolf | const AES_KEY *key)
|
299 | 45aba42f | Kevin Wolf | { |
300 | 45aba42f | Kevin Wolf | union {
|
301 | 45aba42f | Kevin Wolf | uint64_t ll[2];
|
302 | 45aba42f | Kevin Wolf | uint8_t b[16];
|
303 | 45aba42f | Kevin Wolf | } ivec; |
304 | 45aba42f | Kevin Wolf | int i;
|
305 | 45aba42f | Kevin Wolf | |
306 | 45aba42f | Kevin Wolf | for(i = 0; i < nb_sectors; i++) { |
307 | 45aba42f | Kevin Wolf | ivec.ll[0] = cpu_to_le64(sector_num);
|
308 | 45aba42f | Kevin Wolf | ivec.ll[1] = 0; |
309 | 45aba42f | Kevin Wolf | AES_cbc_encrypt(in_buf, out_buf, 512, key,
|
310 | 45aba42f | Kevin Wolf | ivec.b, enc); |
311 | 45aba42f | Kevin Wolf | sector_num++; |
312 | 45aba42f | Kevin Wolf | in_buf += 512;
|
313 | 45aba42f | Kevin Wolf | out_buf += 512;
|
314 | 45aba42f | Kevin Wolf | } |
315 | 45aba42f | Kevin Wolf | } |
316 | 45aba42f | Kevin Wolf | |
317 | aef4acb6 | Stefan Hajnoczi | static int coroutine_fn copy_sectors(BlockDriverState *bs, |
318 | aef4acb6 | Stefan Hajnoczi | uint64_t start_sect, |
319 | aef4acb6 | Stefan Hajnoczi | uint64_t cluster_offset, |
320 | aef4acb6 | Stefan Hajnoczi | int n_start, int n_end) |
321 | 45aba42f | Kevin Wolf | { |
322 | 45aba42f | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
323 | aef4acb6 | Stefan Hajnoczi | QEMUIOVector qiov; |
324 | aef4acb6 | Stefan Hajnoczi | struct iovec iov;
|
325 | 45aba42f | Kevin Wolf | int n, ret;
|
326 | 1b9f1491 | Kevin Wolf | |
327 | 1b9f1491 | Kevin Wolf | /*
|
328 | 1b9f1491 | Kevin Wolf | * If this is the last cluster and it is only partially used, we must only
|
329 | 1b9f1491 | Kevin Wolf | * copy until the end of the image, or bdrv_check_request will fail for the
|
330 | 1b9f1491 | Kevin Wolf | * bdrv_read/write calls below.
|
331 | 1b9f1491 | Kevin Wolf | */
|
332 | 1b9f1491 | Kevin Wolf | if (start_sect + n_end > bs->total_sectors) {
|
333 | 1b9f1491 | Kevin Wolf | n_end = bs->total_sectors - start_sect; |
334 | 1b9f1491 | Kevin Wolf | } |
335 | 45aba42f | Kevin Wolf | |
336 | 45aba42f | Kevin Wolf | n = n_end - n_start; |
337 | 1b9f1491 | Kevin Wolf | if (n <= 0) { |
338 | 45aba42f | Kevin Wolf | return 0; |
339 | 1b9f1491 | Kevin Wolf | } |
340 | 1b9f1491 | Kevin Wolf | |
341 | aef4acb6 | Stefan Hajnoczi | iov.iov_len = n * BDRV_SECTOR_SIZE; |
342 | aef4acb6 | Stefan Hajnoczi | iov.iov_base = qemu_blockalign(bs, iov.iov_len); |
343 | aef4acb6 | Stefan Hajnoczi | |
344 | aef4acb6 | Stefan Hajnoczi | qemu_iovec_init_external(&qiov, &iov, 1);
|
345 | 1b9f1491 | Kevin Wolf | |
346 | 66f82cee | Kevin Wolf | BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); |
347 | aef4acb6 | Stefan Hajnoczi | |
348 | aef4acb6 | Stefan Hajnoczi | /* Call .bdrv_co_readv() directly instead of using the public block-layer
|
349 | aef4acb6 | Stefan Hajnoczi | * interface. This avoids double I/O throttling and request tracking,
|
350 | aef4acb6 | Stefan Hajnoczi | * which can lead to deadlock when block layer copy-on-read is enabled.
|
351 | aef4acb6 | Stefan Hajnoczi | */
|
352 | aef4acb6 | Stefan Hajnoczi | ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov); |
353 | 1b9f1491 | Kevin Wolf | if (ret < 0) { |
354 | 1b9f1491 | Kevin Wolf | goto out;
|
355 | 1b9f1491 | Kevin Wolf | } |
356 | 1b9f1491 | Kevin Wolf | |
357 | 45aba42f | Kevin Wolf | if (s->crypt_method) {
|
358 | ed6ccf0f | Kevin Wolf | qcow2_encrypt_sectors(s, start_sect + n_start, |
359 | aef4acb6 | Stefan Hajnoczi | iov.iov_base, iov.iov_base, n, 1,
|
360 | 45aba42f | Kevin Wolf | &s->aes_encrypt_key); |
361 | 45aba42f | Kevin Wolf | } |
362 | 1b9f1491 | Kevin Wolf | |
363 | 66f82cee | Kevin Wolf | BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); |
364 | aef4acb6 | Stefan Hajnoczi | ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov);
|
365 | 1b9f1491 | Kevin Wolf | if (ret < 0) { |
366 | 1b9f1491 | Kevin Wolf | goto out;
|
367 | 1b9f1491 | Kevin Wolf | } |
368 | 1b9f1491 | Kevin Wolf | |
369 | 1b9f1491 | Kevin Wolf | ret = 0;
|
370 | 1b9f1491 | Kevin Wolf | out:
|
371 | aef4acb6 | Stefan Hajnoczi | qemu_vfree(iov.iov_base); |
372 | 1b9f1491 | Kevin Wolf | return ret;
|
373 | 45aba42f | Kevin Wolf | } |
374 | 45aba42f | Kevin Wolf | |
375 | 45aba42f | Kevin Wolf | |
376 | 45aba42f | Kevin Wolf | /*
|
377 | 45aba42f | Kevin Wolf | * get_cluster_offset
|
378 | 45aba42f | Kevin Wolf | *
|
379 | 1c46efaa | Kevin Wolf | * For a given offset of the disk image, find the cluster offset in
|
380 | 1c46efaa | Kevin Wolf | * qcow2 file. The offset is stored in *cluster_offset.
|
381 | 45aba42f | Kevin Wolf | *
|
382 | d57237f2 | Devin Nakamura | * on entry, *num is the number of contiguous sectors we'd like to
|
383 | 45aba42f | Kevin Wolf | * access following offset.
|
384 | 45aba42f | Kevin Wolf | *
|
385 | d57237f2 | Devin Nakamura | * on exit, *num is the number of contiguous sectors we can read.
|
386 | 45aba42f | Kevin Wolf | *
|
387 | 68d000a3 | Kevin Wolf | * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
|
388 | 68d000a3 | Kevin Wolf | * cases.
|
389 | 45aba42f | Kevin Wolf | */
|
390 | 1c46efaa | Kevin Wolf | int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
|
391 | 1c46efaa | Kevin Wolf | int *num, uint64_t *cluster_offset)
|
392 | 45aba42f | Kevin Wolf | { |
393 | 45aba42f | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
394 | 80ee15a6 | Kevin Wolf | unsigned int l1_index, l2_index; |
395 | 1c46efaa | Kevin Wolf | uint64_t l2_offset, *l2_table; |
396 | 45aba42f | Kevin Wolf | int l1_bits, c;
|
397 | 80ee15a6 | Kevin Wolf | unsigned int index_in_cluster, nb_clusters; |
398 | 80ee15a6 | Kevin Wolf | uint64_t nb_available, nb_needed; |
399 | 55c17e98 | Kevin Wolf | int ret;
|
400 | 45aba42f | Kevin Wolf | |
401 | 45aba42f | Kevin Wolf | index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); |
402 | 45aba42f | Kevin Wolf | nb_needed = *num + index_in_cluster; |
403 | 45aba42f | Kevin Wolf | |
404 | 45aba42f | Kevin Wolf | l1_bits = s->l2_bits + s->cluster_bits; |
405 | 45aba42f | Kevin Wolf | |
406 | 45aba42f | Kevin Wolf | /* compute how many bytes there are between the offset and
|
407 | 45aba42f | Kevin Wolf | * the end of the l1 entry
|
408 | 45aba42f | Kevin Wolf | */
|
409 | 45aba42f | Kevin Wolf | |
410 | 80ee15a6 | Kevin Wolf | nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)); |
411 | 45aba42f | Kevin Wolf | |
412 | 45aba42f | Kevin Wolf | /* compute the number of available sectors */
|
413 | 45aba42f | Kevin Wolf | |
414 | 45aba42f | Kevin Wolf | nb_available = (nb_available >> 9) + index_in_cluster;
|
415 | 45aba42f | Kevin Wolf | |
416 | 45aba42f | Kevin Wolf | if (nb_needed > nb_available) {
|
417 | 45aba42f | Kevin Wolf | nb_needed = nb_available; |
418 | 45aba42f | Kevin Wolf | } |
419 | 45aba42f | Kevin Wolf | |
420 | 1c46efaa | Kevin Wolf | *cluster_offset = 0;
|
421 | 45aba42f | Kevin Wolf | |
422 | 45aba42f | Kevin Wolf | /* seek the the l2 offset in the l1 table */
|
423 | 45aba42f | Kevin Wolf | |
424 | 45aba42f | Kevin Wolf | l1_index = offset >> l1_bits; |
425 | 68d000a3 | Kevin Wolf | if (l1_index >= s->l1_size) {
|
426 | 68d000a3 | Kevin Wolf | ret = QCOW2_CLUSTER_UNALLOCATED; |
427 | 45aba42f | Kevin Wolf | goto out;
|
428 | 68d000a3 | Kevin Wolf | } |
429 | 45aba42f | Kevin Wolf | |
430 | 68d000a3 | Kevin Wolf | l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; |
431 | 68d000a3 | Kevin Wolf | if (!l2_offset) {
|
432 | 68d000a3 | Kevin Wolf | ret = QCOW2_CLUSTER_UNALLOCATED; |
433 | 45aba42f | Kevin Wolf | goto out;
|
434 | 68d000a3 | Kevin Wolf | } |
435 | 45aba42f | Kevin Wolf | |
436 | 45aba42f | Kevin Wolf | /* load the l2 table in memory */
|
437 | 45aba42f | Kevin Wolf | |
438 | 55c17e98 | Kevin Wolf | ret = l2_load(bs, l2_offset, &l2_table); |
439 | 55c17e98 | Kevin Wolf | if (ret < 0) { |
440 | 55c17e98 | Kevin Wolf | return ret;
|
441 | 1c46efaa | Kevin Wolf | } |
442 | 45aba42f | Kevin Wolf | |
443 | 45aba42f | Kevin Wolf | /* find the cluster offset for the given disk offset */
|
444 | 45aba42f | Kevin Wolf | |
445 | 45aba42f | Kevin Wolf | l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
|
446 | 1c46efaa | Kevin Wolf | *cluster_offset = be64_to_cpu(l2_table[l2_index]); |
447 | 45aba42f | Kevin Wolf | nb_clusters = size_to_clusters(s, nb_needed << 9);
|
448 | 45aba42f | Kevin Wolf | |
449 | 68d000a3 | Kevin Wolf | ret = qcow2_get_cluster_type(*cluster_offset); |
450 | 68d000a3 | Kevin Wolf | switch (ret) {
|
451 | 68d000a3 | Kevin Wolf | case QCOW2_CLUSTER_COMPRESSED:
|
452 | 68d000a3 | Kevin Wolf | /* Compressed clusters can only be processed one by one */
|
453 | 68d000a3 | Kevin Wolf | c = 1;
|
454 | 68d000a3 | Kevin Wolf | *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; |
455 | 68d000a3 | Kevin Wolf | break;
|
456 | 6377af48 | Kevin Wolf | case QCOW2_CLUSTER_ZERO:
|
457 | 6377af48 | Kevin Wolf | c = count_contiguous_clusters(nb_clusters, s->cluster_size, |
458 | 6377af48 | Kevin Wolf | &l2_table[l2_index], 0,
|
459 | 6377af48 | Kevin Wolf | QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO); |
460 | 6377af48 | Kevin Wolf | *cluster_offset = 0;
|
461 | 6377af48 | Kevin Wolf | break;
|
462 | 68d000a3 | Kevin Wolf | case QCOW2_CLUSTER_UNALLOCATED:
|
463 | 45aba42f | Kevin Wolf | /* how many empty clusters ? */
|
464 | 45aba42f | Kevin Wolf | c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); |
465 | 68d000a3 | Kevin Wolf | *cluster_offset = 0;
|
466 | 68d000a3 | Kevin Wolf | break;
|
467 | 68d000a3 | Kevin Wolf | case QCOW2_CLUSTER_NORMAL:
|
468 | 45aba42f | Kevin Wolf | /* how many allocated clusters ? */
|
469 | 45aba42f | Kevin Wolf | c = count_contiguous_clusters(nb_clusters, s->cluster_size, |
470 | 6377af48 | Kevin Wolf | &l2_table[l2_index], 0,
|
471 | 6377af48 | Kevin Wolf | QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO); |
472 | 68d000a3 | Kevin Wolf | *cluster_offset &= L2E_OFFSET_MASK; |
473 | 68d000a3 | Kevin Wolf | break;
|
474 | 1417d7e4 | Kevin Wolf | default:
|
475 | 1417d7e4 | Kevin Wolf | abort(); |
476 | 45aba42f | Kevin Wolf | } |
477 | 45aba42f | Kevin Wolf | |
478 | 29c1a730 | Kevin Wolf | qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
479 | 29c1a730 | Kevin Wolf | |
480 | 68d000a3 | Kevin Wolf | nb_available = (c * s->cluster_sectors); |
481 | 68d000a3 | Kevin Wolf | |
482 | 45aba42f | Kevin Wolf | out:
|
483 | 45aba42f | Kevin Wolf | if (nb_available > nb_needed)
|
484 | 45aba42f | Kevin Wolf | nb_available = nb_needed; |
485 | 45aba42f | Kevin Wolf | |
486 | 45aba42f | Kevin Wolf | *num = nb_available - index_in_cluster; |
487 | 45aba42f | Kevin Wolf | |
488 | 68d000a3 | Kevin Wolf | return ret;
|
489 | 45aba42f | Kevin Wolf | } |
490 | 45aba42f | Kevin Wolf | |
491 | 45aba42f | Kevin Wolf | /*
|
492 | 45aba42f | Kevin Wolf | * get_cluster_table
|
493 | 45aba42f | Kevin Wolf | *
|
494 | 45aba42f | Kevin Wolf | * for a given disk offset, load (and allocate if needed)
|
495 | 45aba42f | Kevin Wolf | * the l2 table.
|
496 | 45aba42f | Kevin Wolf | *
|
497 | 45aba42f | Kevin Wolf | * the l2 table offset in the qcow2 file and the cluster index
|
498 | 45aba42f | Kevin Wolf | * in the l2 table are given to the caller.
|
499 | 45aba42f | Kevin Wolf | *
|
500 | 1e3e8f1a | Kevin Wolf | * Returns 0 on success, -errno in failure case
|
501 | 45aba42f | Kevin Wolf | */
|
502 | 45aba42f | Kevin Wolf | static int get_cluster_table(BlockDriverState *bs, uint64_t offset, |
503 | 45aba42f | Kevin Wolf | uint64_t **new_l2_table, |
504 | 45aba42f | Kevin Wolf | int *new_l2_index)
|
505 | 45aba42f | Kevin Wolf | { |
506 | 45aba42f | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
507 | 80ee15a6 | Kevin Wolf | unsigned int l1_index, l2_index; |
508 | c46e1167 | Kevin Wolf | uint64_t l2_offset; |
509 | c46e1167 | Kevin Wolf | uint64_t *l2_table = NULL;
|
510 | 80ee15a6 | Kevin Wolf | int ret;
|
511 | 45aba42f | Kevin Wolf | |
512 | 45aba42f | Kevin Wolf | /* seek the the l2 offset in the l1 table */
|
513 | 45aba42f | Kevin Wolf | |
514 | 45aba42f | Kevin Wolf | l1_index = offset >> (s->l2_bits + s->cluster_bits); |
515 | 45aba42f | Kevin Wolf | if (l1_index >= s->l1_size) {
|
516 | 72893756 | Stefan Hajnoczi | ret = qcow2_grow_l1_table(bs, l1_index + 1, false); |
517 | 1e3e8f1a | Kevin Wolf | if (ret < 0) { |
518 | 1e3e8f1a | Kevin Wolf | return ret;
|
519 | 1e3e8f1a | Kevin Wolf | } |
520 | 45aba42f | Kevin Wolf | } |
521 | 8e37f681 | Kevin Wolf | |
522 | 8e37f681 | Kevin Wolf | l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; |
523 | 45aba42f | Kevin Wolf | |
524 | 45aba42f | Kevin Wolf | /* seek the l2 table of the given l2 offset */
|
525 | 45aba42f | Kevin Wolf | |
526 | 8e37f681 | Kevin Wolf | if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
|
527 | 45aba42f | Kevin Wolf | /* load the l2 table in memory */
|
528 | 55c17e98 | Kevin Wolf | ret = l2_load(bs, l2_offset, &l2_table); |
529 | 55c17e98 | Kevin Wolf | if (ret < 0) { |
530 | 55c17e98 | Kevin Wolf | return ret;
|
531 | 1e3e8f1a | Kevin Wolf | } |
532 | 45aba42f | Kevin Wolf | } else {
|
533 | 16fde5f2 | Kevin Wolf | /* First allocate a new L2 table (and do COW if needed) */
|
534 | c46e1167 | Kevin Wolf | ret = l2_allocate(bs, l1_index, &l2_table); |
535 | c46e1167 | Kevin Wolf | if (ret < 0) { |
536 | c46e1167 | Kevin Wolf | return ret;
|
537 | 1e3e8f1a | Kevin Wolf | } |
538 | 16fde5f2 | Kevin Wolf | |
539 | 16fde5f2 | Kevin Wolf | /* Then decrease the refcount of the old table */
|
540 | 16fde5f2 | Kevin Wolf | if (l2_offset) {
|
541 | 16fde5f2 | Kevin Wolf | qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
|
542 | 16fde5f2 | Kevin Wolf | } |
543 | 45aba42f | Kevin Wolf | } |
544 | 45aba42f | Kevin Wolf | |
545 | 45aba42f | Kevin Wolf | /* find the cluster offset for the given disk offset */
|
546 | 45aba42f | Kevin Wolf | |
547 | 45aba42f | Kevin Wolf | l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
|
548 | 45aba42f | Kevin Wolf | |
549 | 45aba42f | Kevin Wolf | *new_l2_table = l2_table; |
550 | 45aba42f | Kevin Wolf | *new_l2_index = l2_index; |
551 | 45aba42f | Kevin Wolf | |
552 | 1e3e8f1a | Kevin Wolf | return 0; |
553 | 45aba42f | Kevin Wolf | } |
554 | 45aba42f | Kevin Wolf | |
555 | 45aba42f | Kevin Wolf | /*
|
556 | 45aba42f | Kevin Wolf | * alloc_compressed_cluster_offset
|
557 | 45aba42f | Kevin Wolf | *
|
558 | 45aba42f | Kevin Wolf | * For a given offset of the disk image, return cluster offset in
|
559 | 45aba42f | Kevin Wolf | * qcow2 file.
|
560 | 45aba42f | Kevin Wolf | *
|
561 | 45aba42f | Kevin Wolf | * If the offset is not found, allocate a new compressed cluster.
|
562 | 45aba42f | Kevin Wolf | *
|
563 | 45aba42f | Kevin Wolf | * Return the cluster offset if successful,
|
564 | 45aba42f | Kevin Wolf | * Return 0, otherwise.
|
565 | 45aba42f | Kevin Wolf | *
|
566 | 45aba42f | Kevin Wolf | */
|
567 | 45aba42f | Kevin Wolf | |
568 | ed6ccf0f | Kevin Wolf | uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, |
569 | ed6ccf0f | Kevin Wolf | uint64_t offset, |
570 | ed6ccf0f | Kevin Wolf | int compressed_size)
|
571 | 45aba42f | Kevin Wolf | { |
572 | 45aba42f | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
573 | 45aba42f | Kevin Wolf | int l2_index, ret;
|
574 | 3948d1d4 | Kevin Wolf | uint64_t *l2_table; |
575 | f4f0d391 | Kevin Wolf | int64_t cluster_offset; |
576 | 45aba42f | Kevin Wolf | int nb_csectors;
|
577 | 45aba42f | Kevin Wolf | |
578 | 3948d1d4 | Kevin Wolf | ret = get_cluster_table(bs, offset, &l2_table, &l2_index); |
579 | 1e3e8f1a | Kevin Wolf | if (ret < 0) { |
580 | 45aba42f | Kevin Wolf | return 0; |
581 | 1e3e8f1a | Kevin Wolf | } |
582 | 45aba42f | Kevin Wolf | |
583 | b0b6862e | Kevin Wolf | /* Compression can't overwrite anything. Fail if the cluster was already
|
584 | b0b6862e | Kevin Wolf | * allocated. */
|
585 | 45aba42f | Kevin Wolf | cluster_offset = be64_to_cpu(l2_table[l2_index]); |
586 | b0b6862e | Kevin Wolf | if (cluster_offset & L2E_OFFSET_MASK) {
|
587 | 8f1efd00 | Kevin Wolf | qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
588 | 8f1efd00 | Kevin Wolf | return 0; |
589 | 8f1efd00 | Kevin Wolf | } |
590 | 45aba42f | Kevin Wolf | |
591 | ed6ccf0f | Kevin Wolf | cluster_offset = qcow2_alloc_bytes(bs, compressed_size); |
592 | 5d757b56 | Kevin Wolf | if (cluster_offset < 0) { |
593 | 29c1a730 | Kevin Wolf | qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
594 | 5d757b56 | Kevin Wolf | return 0; |
595 | 5d757b56 | Kevin Wolf | } |
596 | 5d757b56 | Kevin Wolf | |
597 | 45aba42f | Kevin Wolf | nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - |
598 | 45aba42f | Kevin Wolf | (cluster_offset >> 9);
|
599 | 45aba42f | Kevin Wolf | |
600 | 45aba42f | Kevin Wolf | cluster_offset |= QCOW_OFLAG_COMPRESSED | |
601 | 45aba42f | Kevin Wolf | ((uint64_t)nb_csectors << s->csize_shift); |
602 | 45aba42f | Kevin Wolf | |
603 | 45aba42f | Kevin Wolf | /* update L2 table */
|
604 | 45aba42f | Kevin Wolf | |
605 | 45aba42f | Kevin Wolf | /* compressed clusters never have the copied flag */
|
606 | 45aba42f | Kevin Wolf | |
607 | 66f82cee | Kevin Wolf | BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); |
608 | 29c1a730 | Kevin Wolf | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
609 | 45aba42f | Kevin Wolf | l2_table[l2_index] = cpu_to_be64(cluster_offset); |
610 | 29c1a730 | Kevin Wolf | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
611 | 79a31189 | Kevin Wolf | if (ret < 0) { |
612 | 29c1a730 | Kevin Wolf | return 0; |
613 | 4c1612d9 | Kevin Wolf | } |
614 | 4c1612d9 | Kevin Wolf | |
615 | 29c1a730 | Kevin Wolf | return cluster_offset;
|
616 | 4c1612d9 | Kevin Wolf | } |
617 | 4c1612d9 | Kevin Wolf | |
618 | 148da7ea | Kevin Wolf | int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
|
619 | 45aba42f | Kevin Wolf | { |
620 | 45aba42f | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
621 | 45aba42f | Kevin Wolf | int i, j = 0, l2_index, ret; |
622 | 3948d1d4 | Kevin Wolf | uint64_t *old_cluster, start_sect, *l2_table; |
623 | 250196f1 | Kevin Wolf | uint64_t cluster_offset = m->alloc_offset; |
624 | 29c1a730 | Kevin Wolf | bool cow = false; |
625 | 45aba42f | Kevin Wolf | |
626 | 3cce16f4 | Kevin Wolf | trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); |
627 | 3cce16f4 | Kevin Wolf | |
628 | 45aba42f | Kevin Wolf | if (m->nb_clusters == 0) |
629 | 45aba42f | Kevin Wolf | return 0; |
630 | 45aba42f | Kevin Wolf | |
631 | 7267c094 | Anthony Liguori | old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t));
|
632 | 45aba42f | Kevin Wolf | |
633 | 45aba42f | Kevin Wolf | /* copy content of unmodified sectors */
|
634 | 1d3afd64 | Kevin Wolf | start_sect = m->offset >> 9;
|
635 | 45aba42f | Kevin Wolf | if (m->n_start) {
|
636 | 29c1a730 | Kevin Wolf | cow = true;
|
637 | 1b9f1491 | Kevin Wolf | qemu_co_mutex_unlock(&s->lock); |
638 | 45aba42f | Kevin Wolf | ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
|
639 | 1b9f1491 | Kevin Wolf | qemu_co_mutex_lock(&s->lock); |
640 | 45aba42f | Kevin Wolf | if (ret < 0) |
641 | 45aba42f | Kevin Wolf | goto err;
|
642 | 45aba42f | Kevin Wolf | } |
643 | 45aba42f | Kevin Wolf | |
644 | 45aba42f | Kevin Wolf | if (m->nb_available & (s->cluster_sectors - 1)) { |
645 | 29c1a730 | Kevin Wolf | cow = true;
|
646 | 1b9f1491 | Kevin Wolf | qemu_co_mutex_unlock(&s->lock); |
647 | cdba7fee | Kevin Wolf | ret = copy_sectors(bs, start_sect, cluster_offset, m->nb_available, |
648 | cdba7fee | Kevin Wolf | align_offset(m->nb_available, s->cluster_sectors)); |
649 | 1b9f1491 | Kevin Wolf | qemu_co_mutex_lock(&s->lock); |
650 | 45aba42f | Kevin Wolf | if (ret < 0) |
651 | 45aba42f | Kevin Wolf | goto err;
|
652 | 45aba42f | Kevin Wolf | } |
653 | 45aba42f | Kevin Wolf | |
654 | 29c1a730 | Kevin Wolf | /*
|
655 | 29c1a730 | Kevin Wolf | * Update L2 table.
|
656 | 29c1a730 | Kevin Wolf | *
|
657 | 29c1a730 | Kevin Wolf | * Before we update the L2 table to actually point to the new cluster, we
|
658 | 29c1a730 | Kevin Wolf | * need to be sure that the refcounts have been increased and COW was
|
659 | 29c1a730 | Kevin Wolf | * handled.
|
660 | 29c1a730 | Kevin Wolf | */
|
661 | 29c1a730 | Kevin Wolf | if (cow) {
|
662 | 3de0a294 | Kevin Wolf | qcow2_cache_depends_on_flush(s->l2_table_cache); |
663 | 29c1a730 | Kevin Wolf | } |
664 | 29c1a730 | Kevin Wolf | |
665 | bfe8043e | Stefan Hajnoczi | if (qcow2_need_accurate_refcounts(s)) {
|
666 | bfe8043e | Stefan Hajnoczi | qcow2_cache_set_dependency(bs, s->l2_table_cache, |
667 | bfe8043e | Stefan Hajnoczi | s->refcount_block_cache); |
668 | bfe8043e | Stefan Hajnoczi | } |
669 | 3948d1d4 | Kevin Wolf | ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index); |
670 | 1e3e8f1a | Kevin Wolf | if (ret < 0) { |
671 | 45aba42f | Kevin Wolf | goto err;
|
672 | 1e3e8f1a | Kevin Wolf | } |
673 | 29c1a730 | Kevin Wolf | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
674 | 45aba42f | Kevin Wolf | |
675 | 45aba42f | Kevin Wolf | for (i = 0; i < m->nb_clusters; i++) { |
676 | 45aba42f | Kevin Wolf | /* if two concurrent writes happen to the same unallocated cluster
|
677 | 45aba42f | Kevin Wolf | * each write allocates separate cluster and writes data concurrently.
|
678 | 45aba42f | Kevin Wolf | * The first one to complete updates l2 table with pointer to its
|
679 | 45aba42f | Kevin Wolf | * cluster the second one has to do RMW (which is done above by
|
680 | 45aba42f | Kevin Wolf | * copy_sectors()), update l2 table with its cluster pointer and free
|
681 | 45aba42f | Kevin Wolf | * old cluster. This is what this loop does */
|
682 | 45aba42f | Kevin Wolf | if(l2_table[l2_index + i] != 0) |
683 | 45aba42f | Kevin Wolf | old_cluster[j++] = l2_table[l2_index + i]; |
684 | 45aba42f | Kevin Wolf | |
685 | 45aba42f | Kevin Wolf | l2_table[l2_index + i] = cpu_to_be64((cluster_offset + |
686 | 45aba42f | Kevin Wolf | (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); |
687 | 45aba42f | Kevin Wolf | } |
688 | 45aba42f | Kevin Wolf | |
689 | 9f8e668e | Kevin Wolf | |
690 | 29c1a730 | Kevin Wolf | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
691 | c835d00f | Kevin Wolf | if (ret < 0) { |
692 | 45aba42f | Kevin Wolf | goto err;
|
693 | 4c1612d9 | Kevin Wolf | } |
694 | 45aba42f | Kevin Wolf | |
695 | 7ec5e6a4 | Kevin Wolf | /*
|
696 | 7ec5e6a4 | Kevin Wolf | * If this was a COW, we need to decrease the refcount of the old cluster.
|
697 | 7ec5e6a4 | Kevin Wolf | * Also flush bs->file to get the right order for L2 and refcount update.
|
698 | 7ec5e6a4 | Kevin Wolf | */
|
699 | 7ec5e6a4 | Kevin Wolf | if (j != 0) { |
700 | 7ec5e6a4 | Kevin Wolf | for (i = 0; i < j; i++) { |
701 | 8e37f681 | Kevin Wolf | qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1);
|
702 | 7ec5e6a4 | Kevin Wolf | } |
703 | 7ec5e6a4 | Kevin Wolf | } |
704 | 45aba42f | Kevin Wolf | |
705 | 45aba42f | Kevin Wolf | ret = 0;
|
706 | 45aba42f | Kevin Wolf | err:
|
707 | 7267c094 | Anthony Liguori | g_free(old_cluster); |
708 | 45aba42f | Kevin Wolf | return ret;
|
709 | 45aba42f | Kevin Wolf | } |
710 | 45aba42f | Kevin Wolf | |
711 | 45aba42f | Kevin Wolf | /*
|
712 | bf319ece | Kevin Wolf | * Returns the number of contiguous clusters that can be used for an allocating
|
713 | bf319ece | Kevin Wolf | * write, but require COW to be performed (this includes yet unallocated space,
|
714 | bf319ece | Kevin Wolf | * which must copy from the backing file)
|
715 | bf319ece | Kevin Wolf | */
|
716 | bf319ece | Kevin Wolf | static int count_cow_clusters(BDRVQcowState *s, int nb_clusters, |
717 | bf319ece | Kevin Wolf | uint64_t *l2_table, int l2_index)
|
718 | bf319ece | Kevin Wolf | { |
719 | 143550a8 | Kevin Wolf | int i;
|
720 | bf319ece | Kevin Wolf | |
721 | 143550a8 | Kevin Wolf | for (i = 0; i < nb_clusters; i++) { |
722 | 143550a8 | Kevin Wolf | uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]); |
723 | 143550a8 | Kevin Wolf | int cluster_type = qcow2_get_cluster_type(l2_entry);
|
724 | 143550a8 | Kevin Wolf | |
725 | 143550a8 | Kevin Wolf | switch(cluster_type) {
|
726 | 143550a8 | Kevin Wolf | case QCOW2_CLUSTER_NORMAL:
|
727 | 143550a8 | Kevin Wolf | if (l2_entry & QCOW_OFLAG_COPIED) {
|
728 | 143550a8 | Kevin Wolf | goto out;
|
729 | 143550a8 | Kevin Wolf | } |
730 | bf319ece | Kevin Wolf | break;
|
731 | 143550a8 | Kevin Wolf | case QCOW2_CLUSTER_UNALLOCATED:
|
732 | 143550a8 | Kevin Wolf | case QCOW2_CLUSTER_COMPRESSED:
|
733 | 6377af48 | Kevin Wolf | case QCOW2_CLUSTER_ZERO:
|
734 | bf319ece | Kevin Wolf | break;
|
735 | 143550a8 | Kevin Wolf | default:
|
736 | 143550a8 | Kevin Wolf | abort(); |
737 | 143550a8 | Kevin Wolf | } |
738 | bf319ece | Kevin Wolf | } |
739 | bf319ece | Kevin Wolf | |
740 | 143550a8 | Kevin Wolf | out:
|
741 | bf319ece | Kevin Wolf | assert(i <= nb_clusters); |
742 | bf319ece | Kevin Wolf | return i;
|
743 | bf319ece | Kevin Wolf | } |
744 | bf319ece | Kevin Wolf | |
745 | bf319ece | Kevin Wolf | /*
|
746 | 250196f1 | Kevin Wolf | * Allocates new clusters for the given guest_offset.
|
747 | 250196f1 | Kevin Wolf | *
|
748 | 250196f1 | Kevin Wolf | * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
|
749 | 250196f1 | Kevin Wolf | * contain the number of clusters that have been allocated and are contiguous
|
750 | 250196f1 | Kevin Wolf | * in the image file.
|
751 | 250196f1 | Kevin Wolf | *
|
752 | 250196f1 | Kevin Wolf | * If *host_offset is non-zero, it specifies the offset in the image file at
|
753 | 250196f1 | Kevin Wolf | * which the new clusters must start. *nb_clusters can be 0 on return in this
|
754 | 250196f1 | Kevin Wolf | * case if the cluster at host_offset is already in use. If *host_offset is
|
755 | 250196f1 | Kevin Wolf | * zero, the clusters can be allocated anywhere in the image file.
|
756 | 250196f1 | Kevin Wolf | *
|
757 | 250196f1 | Kevin Wolf | * *host_offset is updated to contain the offset into the image file at which
|
758 | 250196f1 | Kevin Wolf | * the first allocated cluster starts.
|
759 | 250196f1 | Kevin Wolf | *
|
760 | 250196f1 | Kevin Wolf | * Return 0 on success and -errno in error cases. -EAGAIN means that the
|
761 | 250196f1 | Kevin Wolf | * function has been waiting for another request and the allocation must be
|
762 | 250196f1 | Kevin Wolf | * restarted, but the whole request should not be failed.
|
763 | 250196f1 | Kevin Wolf | */
|
764 | 250196f1 | Kevin Wolf | static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, |
765 | 60651f90 | Kevin Wolf | uint64_t *host_offset, unsigned int *nb_clusters) |
766 | 250196f1 | Kevin Wolf | { |
767 | 250196f1 | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
768 | 250196f1 | Kevin Wolf | QCowL2Meta *old_alloc; |
769 | 250196f1 | Kevin Wolf | |
770 | 250196f1 | Kevin Wolf | trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, |
771 | 250196f1 | Kevin Wolf | *host_offset, *nb_clusters); |
772 | 250196f1 | Kevin Wolf | |
773 | 250196f1 | Kevin Wolf | /*
|
774 | 250196f1 | Kevin Wolf | * Check if there already is an AIO write request in flight which allocates
|
775 | 250196f1 | Kevin Wolf | * the same cluster. In this case we need to wait until the previous
|
776 | 250196f1 | Kevin Wolf | * request has completed and updated the L2 table accordingly.
|
777 | 250196f1 | Kevin Wolf | */
|
778 | 250196f1 | Kevin Wolf | QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { |
779 | 250196f1 | Kevin Wolf | |
780 | 250196f1 | Kevin Wolf | uint64_t start = guest_offset >> s->cluster_bits; |
781 | 250196f1 | Kevin Wolf | uint64_t end = start + *nb_clusters; |
782 | 250196f1 | Kevin Wolf | uint64_t old_start = old_alloc->offset >> s->cluster_bits; |
783 | 250196f1 | Kevin Wolf | uint64_t old_end = old_start + old_alloc->nb_clusters; |
784 | 250196f1 | Kevin Wolf | |
785 | 250196f1 | Kevin Wolf | if (end < old_start || start > old_end) {
|
786 | 250196f1 | Kevin Wolf | /* No intersection */
|
787 | 250196f1 | Kevin Wolf | } else {
|
788 | 250196f1 | Kevin Wolf | if (start < old_start) {
|
789 | 250196f1 | Kevin Wolf | /* Stop at the start of a running allocation */
|
790 | 250196f1 | Kevin Wolf | *nb_clusters = old_start - start; |
791 | 250196f1 | Kevin Wolf | } else {
|
792 | 250196f1 | Kevin Wolf | *nb_clusters = 0;
|
793 | 250196f1 | Kevin Wolf | } |
794 | 250196f1 | Kevin Wolf | |
795 | 250196f1 | Kevin Wolf | if (*nb_clusters == 0) { |
796 | 250196f1 | Kevin Wolf | /* Wait for the dependency to complete. We need to recheck
|
797 | 250196f1 | Kevin Wolf | * the free/allocated clusters when we continue. */
|
798 | 250196f1 | Kevin Wolf | qemu_co_mutex_unlock(&s->lock); |
799 | 250196f1 | Kevin Wolf | qemu_co_queue_wait(&old_alloc->dependent_requests); |
800 | 250196f1 | Kevin Wolf | qemu_co_mutex_lock(&s->lock); |
801 | 250196f1 | Kevin Wolf | return -EAGAIN;
|
802 | 250196f1 | Kevin Wolf | } |
803 | 250196f1 | Kevin Wolf | } |
804 | 250196f1 | Kevin Wolf | } |
805 | 250196f1 | Kevin Wolf | |
806 | 250196f1 | Kevin Wolf | if (!*nb_clusters) {
|
807 | 250196f1 | Kevin Wolf | abort(); |
808 | 250196f1 | Kevin Wolf | } |
809 | 250196f1 | Kevin Wolf | |
810 | 250196f1 | Kevin Wolf | /* Allocate new clusters */
|
811 | 250196f1 | Kevin Wolf | trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); |
812 | 250196f1 | Kevin Wolf | if (*host_offset == 0) { |
813 | df021791 | Kevin Wolf | int64_t cluster_offset = |
814 | df021791 | Kevin Wolf | qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); |
815 | df021791 | Kevin Wolf | if (cluster_offset < 0) { |
816 | df021791 | Kevin Wolf | return cluster_offset;
|
817 | df021791 | Kevin Wolf | } |
818 | df021791 | Kevin Wolf | *host_offset = cluster_offset; |
819 | df021791 | Kevin Wolf | return 0; |
820 | 250196f1 | Kevin Wolf | } else {
|
821 | df021791 | Kevin Wolf | int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
|
822 | df021791 | Kevin Wolf | if (ret < 0) { |
823 | df021791 | Kevin Wolf | return ret;
|
824 | df021791 | Kevin Wolf | } |
825 | df021791 | Kevin Wolf | *nb_clusters = ret; |
826 | df021791 | Kevin Wolf | return 0; |
827 | 250196f1 | Kevin Wolf | } |
828 | 250196f1 | Kevin Wolf | } |
829 | 250196f1 | Kevin Wolf | |
830 | 250196f1 | Kevin Wolf | /*
|
831 | 45aba42f | Kevin Wolf | * alloc_cluster_offset
|
832 | 45aba42f | Kevin Wolf | *
|
833 | 250196f1 | Kevin Wolf | * For a given offset on the virtual disk, find the cluster offset in qcow2
|
834 | 250196f1 | Kevin Wolf | * file. If the offset is not found, allocate a new cluster.
|
835 | 45aba42f | Kevin Wolf | *
|
836 | 250196f1 | Kevin Wolf | * If the cluster was already allocated, m->nb_clusters is set to 0 and
|
837 | a7912369 | Frediano Ziglio | * other fields in m are meaningless.
|
838 | 148da7ea | Kevin Wolf | *
|
839 | 148da7ea | Kevin Wolf | * If the cluster is newly allocated, m->nb_clusters is set to the number of
|
840 | 68d100e9 | Kevin Wolf | * contiguous clusters that have been allocated. In this case, the other
|
841 | 68d100e9 | Kevin Wolf | * fields of m are valid and contain information about the first allocated
|
842 | 68d100e9 | Kevin Wolf | * cluster.
|
843 | 45aba42f | Kevin Wolf | *
|
844 | 68d100e9 | Kevin Wolf | * If the request conflicts with another write request in flight, the coroutine
|
845 | 68d100e9 | Kevin Wolf | * is queued and will be reentered when the dependency has completed.
|
846 | 148da7ea | Kevin Wolf | *
|
847 | 148da7ea | Kevin Wolf | * Return 0 on success and -errno in error cases
|
848 | 45aba42f | Kevin Wolf | */
|
849 | f4f0d391 | Kevin Wolf | int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
|
850 | f4f0d391 | Kevin Wolf | int n_start, int n_end, int *num, QCowL2Meta *m) |
851 | 45aba42f | Kevin Wolf | { |
852 | 45aba42f | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
853 | 250196f1 | Kevin Wolf | int l2_index, ret, sectors;
|
854 | 3948d1d4 | Kevin Wolf | uint64_t *l2_table; |
855 | 250196f1 | Kevin Wolf | unsigned int nb_clusters, keep_clusters; |
856 | 250196f1 | Kevin Wolf | uint64_t cluster_offset; |
857 | 45aba42f | Kevin Wolf | |
858 | 3cce16f4 | Kevin Wolf | trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, |
859 | 3cce16f4 | Kevin Wolf | n_start, n_end); |
860 | 3cce16f4 | Kevin Wolf | |
861 | 250196f1 | Kevin Wolf | /* Find L2 entry for the first involved cluster */
|
862 | 72424114 | Kevin Wolf | again:
|
863 | 3948d1d4 | Kevin Wolf | ret = get_cluster_table(bs, offset, &l2_table, &l2_index); |
864 | 1e3e8f1a | Kevin Wolf | if (ret < 0) { |
865 | 148da7ea | Kevin Wolf | return ret;
|
866 | 1e3e8f1a | Kevin Wolf | } |
867 | 45aba42f | Kevin Wolf | |
868 | 250196f1 | Kevin Wolf | /*
|
869 | 250196f1 | Kevin Wolf | * Calculate the number of clusters to look for. We stop at L2 table
|
870 | 250196f1 | Kevin Wolf | * boundaries to keep things simple.
|
871 | 250196f1 | Kevin Wolf | */
|
872 | 250196f1 | Kevin Wolf | nb_clusters = MIN(size_to_clusters(s, n_end << BDRV_SECTOR_BITS), |
873 | 250196f1 | Kevin Wolf | s->l2_size - l2_index); |
874 | 45aba42f | Kevin Wolf | |
875 | 45aba42f | Kevin Wolf | cluster_offset = be64_to_cpu(l2_table[l2_index]); |
876 | 45aba42f | Kevin Wolf | |
877 | 250196f1 | Kevin Wolf | /*
|
878 | 250196f1 | Kevin Wolf | * Check how many clusters are already allocated and don't need COW, and how
|
879 | 250196f1 | Kevin Wolf | * many need a new allocation.
|
880 | 250196f1 | Kevin Wolf | */
|
881 | 8e37f681 | Kevin Wolf | if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
|
882 | 8e37f681 | Kevin Wolf | && (cluster_offset & QCOW_OFLAG_COPIED)) |
883 | 8e37f681 | Kevin Wolf | { |
884 | 250196f1 | Kevin Wolf | /* We keep all QCOW_OFLAG_COPIED clusters */
|
885 | 6377af48 | Kevin Wolf | keep_clusters = |
886 | 6377af48 | Kevin Wolf | count_contiguous_clusters(nb_clusters, s->cluster_size, |
887 | 6377af48 | Kevin Wolf | &l2_table[l2_index], 0,
|
888 | 6377af48 | Kevin Wolf | QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); |
889 | 250196f1 | Kevin Wolf | assert(keep_clusters <= nb_clusters); |
890 | 250196f1 | Kevin Wolf | nb_clusters -= keep_clusters; |
891 | 250196f1 | Kevin Wolf | } else {
|
892 | 54e68143 | Kevin Wolf | keep_clusters = 0;
|
893 | 54e68143 | Kevin Wolf | cluster_offset = 0;
|
894 | 54e68143 | Kevin Wolf | } |
895 | 54e68143 | Kevin Wolf | |
896 | 54e68143 | Kevin Wolf | if (nb_clusters > 0) { |
897 | 250196f1 | Kevin Wolf | /* For the moment, overwrite compressed clusters one by one */
|
898 | 54e68143 | Kevin Wolf | uint64_t entry = be64_to_cpu(l2_table[l2_index + keep_clusters]); |
899 | 54e68143 | Kevin Wolf | if (entry & QCOW_OFLAG_COMPRESSED) {
|
900 | 250196f1 | Kevin Wolf | nb_clusters = 1;
|
901 | 250196f1 | Kevin Wolf | } else {
|
902 | 54e68143 | Kevin Wolf | nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, |
903 | 54e68143 | Kevin Wolf | l2_index + keep_clusters); |
904 | 250196f1 | Kevin Wolf | } |
905 | 45aba42f | Kevin Wolf | } |
906 | 45aba42f | Kevin Wolf | |
907 | 8e37f681 | Kevin Wolf | cluster_offset &= L2E_OFFSET_MASK; |
908 | 45aba42f | Kevin Wolf | |
909 | 72424114 | Kevin Wolf | /*
|
910 | 72424114 | Kevin Wolf | * The L2 table isn't used any more after this. As long as the cache works
|
911 | 72424114 | Kevin Wolf | * synchronously, it's important to release it before calling
|
912 | 72424114 | Kevin Wolf | * do_alloc_cluster_offset, which may yield if we need to wait for another
|
913 | 72424114 | Kevin Wolf | * request to complete. If we still had the reference, we could use up the
|
914 | 72424114 | Kevin Wolf | * whole cache with sleeping requests.
|
915 | 72424114 | Kevin Wolf | */
|
916 | 72424114 | Kevin Wolf | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
917 | 72424114 | Kevin Wolf | if (ret < 0) { |
918 | 72424114 | Kevin Wolf | return ret;
|
919 | 72424114 | Kevin Wolf | } |
920 | 72424114 | Kevin Wolf | |
921 | 250196f1 | Kevin Wolf | /* If there is something left to allocate, do that now */
|
922 | 250196f1 | Kevin Wolf | *m = (QCowL2Meta) { |
923 | 250196f1 | Kevin Wolf | .cluster_offset = cluster_offset, |
924 | 250196f1 | Kevin Wolf | .nb_clusters = 0,
|
925 | 250196f1 | Kevin Wolf | }; |
926 | 250196f1 | Kevin Wolf | qemu_co_queue_init(&m->dependent_requests); |
927 | 45aba42f | Kevin Wolf | |
928 | 250196f1 | Kevin Wolf | if (nb_clusters > 0) { |
929 | 250196f1 | Kevin Wolf | uint64_t alloc_offset; |
930 | 250196f1 | Kevin Wolf | uint64_t alloc_cluster_offset; |
931 | 250196f1 | Kevin Wolf | uint64_t keep_bytes = keep_clusters * s->cluster_size; |
932 | 45aba42f | Kevin Wolf | |
933 | 250196f1 | Kevin Wolf | /* Calculate start and size of allocation */
|
934 | 250196f1 | Kevin Wolf | alloc_offset = offset + keep_bytes; |
935 | 45aba42f | Kevin Wolf | |
936 | 250196f1 | Kevin Wolf | if (keep_clusters == 0) { |
937 | 250196f1 | Kevin Wolf | alloc_cluster_offset = 0;
|
938 | f214978a | Kevin Wolf | } else {
|
939 | 250196f1 | Kevin Wolf | alloc_cluster_offset = cluster_offset + keep_bytes; |
940 | f214978a | Kevin Wolf | } |
941 | f214978a | Kevin Wolf | |
942 | 250196f1 | Kevin Wolf | /* Allocate, if necessary at a given offset in the image file */
|
943 | 250196f1 | Kevin Wolf | ret = do_alloc_cluster_offset(bs, alloc_offset, &alloc_cluster_offset, |
944 | 60651f90 | Kevin Wolf | &nb_clusters); |
945 | 250196f1 | Kevin Wolf | if (ret == -EAGAIN) {
|
946 | 250196f1 | Kevin Wolf | goto again;
|
947 | 250196f1 | Kevin Wolf | } else if (ret < 0) { |
948 | 250196f1 | Kevin Wolf | goto fail;
|
949 | 250196f1 | Kevin Wolf | } |
950 | f214978a | Kevin Wolf | |
951 | 250196f1 | Kevin Wolf | /* save info needed for meta data update */
|
952 | 250196f1 | Kevin Wolf | if (nb_clusters > 0) { |
953 | b7ab0fea | Kevin Wolf | /*
|
954 | b7ab0fea | Kevin Wolf | * requested_sectors: Number of sectors from the start of the first
|
955 | b7ab0fea | Kevin Wolf | * newly allocated cluster to the end of the (possibly shortened
|
956 | b7ab0fea | Kevin Wolf | * before) write request.
|
957 | b7ab0fea | Kevin Wolf | *
|
958 | b7ab0fea | Kevin Wolf | * avail_sectors: Number of sectors from the start of the first
|
959 | b7ab0fea | Kevin Wolf | * newly allocated to the end of the last newly allocated cluster.
|
960 | b7ab0fea | Kevin Wolf | */
|
961 | 250196f1 | Kevin Wolf | int requested_sectors = n_end - keep_clusters * s->cluster_sectors;
|
962 | b7ab0fea | Kevin Wolf | int avail_sectors = nb_clusters
|
963 | 250196f1 | Kevin Wolf | << (s->cluster_bits - BDRV_SECTOR_BITS); |
964 | 250196f1 | Kevin Wolf | |
965 | 250196f1 | Kevin Wolf | *m = (QCowL2Meta) { |
966 | 250196f1 | Kevin Wolf | .cluster_offset = keep_clusters == 0 ?
|
967 | 250196f1 | Kevin Wolf | alloc_cluster_offset : cluster_offset, |
968 | 250196f1 | Kevin Wolf | .alloc_offset = alloc_cluster_offset, |
969 | 1d3afd64 | Kevin Wolf | .offset = alloc_offset & ~(s->cluster_size - 1),
|
970 | 250196f1 | Kevin Wolf | .n_start = keep_clusters == 0 ? n_start : 0, |
971 | 250196f1 | Kevin Wolf | .nb_clusters = nb_clusters, |
972 | 250196f1 | Kevin Wolf | .nb_available = MIN(requested_sectors, avail_sectors), |
973 | 250196f1 | Kevin Wolf | }; |
974 | 250196f1 | Kevin Wolf | qemu_co_queue_init(&m->dependent_requests); |
975 | 250196f1 | Kevin Wolf | QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight); |
976 | 250196f1 | Kevin Wolf | } |
977 | 5d757b56 | Kevin Wolf | } |
978 | 45aba42f | Kevin Wolf | |
979 | 250196f1 | Kevin Wolf | /* Some cleanup work */
|
980 | 250196f1 | Kevin Wolf | sectors = (keep_clusters + nb_clusters) << (s->cluster_bits - 9);
|
981 | 250196f1 | Kevin Wolf | if (sectors > n_end) {
|
982 | 250196f1 | Kevin Wolf | sectors = n_end; |
983 | 250196f1 | Kevin Wolf | } |
984 | 45aba42f | Kevin Wolf | |
985 | 250196f1 | Kevin Wolf | assert(sectors > n_start); |
986 | 250196f1 | Kevin Wolf | *num = sectors - n_start; |
987 | 45aba42f | Kevin Wolf | |
988 | 148da7ea | Kevin Wolf | return 0; |
989 | 29c1a730 | Kevin Wolf | |
990 | 29c1a730 | Kevin Wolf | fail:
|
991 | 8dc0a5e7 | Kevin Wolf | if (m->nb_clusters > 0) { |
992 | 250196f1 | Kevin Wolf | QLIST_REMOVE(m, next_in_flight); |
993 | 250196f1 | Kevin Wolf | } |
994 | 29c1a730 | Kevin Wolf | return ret;
|
995 | 45aba42f | Kevin Wolf | } |
996 | 45aba42f | Kevin Wolf | |
997 | 45aba42f | Kevin Wolf | static int decompress_buffer(uint8_t *out_buf, int out_buf_size, |
998 | 45aba42f | Kevin Wolf | const uint8_t *buf, int buf_size) |
999 | 45aba42f | Kevin Wolf | { |
1000 | 45aba42f | Kevin Wolf | z_stream strm1, *strm = &strm1; |
1001 | 45aba42f | Kevin Wolf | int ret, out_len;
|
1002 | 45aba42f | Kevin Wolf | |
1003 | 45aba42f | Kevin Wolf | memset(strm, 0, sizeof(*strm)); |
1004 | 45aba42f | Kevin Wolf | |
1005 | 45aba42f | Kevin Wolf | strm->next_in = (uint8_t *)buf; |
1006 | 45aba42f | Kevin Wolf | strm->avail_in = buf_size; |
1007 | 45aba42f | Kevin Wolf | strm->next_out = out_buf; |
1008 | 45aba42f | Kevin Wolf | strm->avail_out = out_buf_size; |
1009 | 45aba42f | Kevin Wolf | |
1010 | 45aba42f | Kevin Wolf | ret = inflateInit2(strm, -12);
|
1011 | 45aba42f | Kevin Wolf | if (ret != Z_OK)
|
1012 | 45aba42f | Kevin Wolf | return -1; |
1013 | 45aba42f | Kevin Wolf | ret = inflate(strm, Z_FINISH); |
1014 | 45aba42f | Kevin Wolf | out_len = strm->next_out - out_buf; |
1015 | 45aba42f | Kevin Wolf | if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
|
1016 | 45aba42f | Kevin Wolf | out_len != out_buf_size) { |
1017 | 45aba42f | Kevin Wolf | inflateEnd(strm); |
1018 | 45aba42f | Kevin Wolf | return -1; |
1019 | 45aba42f | Kevin Wolf | } |
1020 | 45aba42f | Kevin Wolf | inflateEnd(strm); |
1021 | 45aba42f | Kevin Wolf | return 0; |
1022 | 45aba42f | Kevin Wolf | } |
1023 | 45aba42f | Kevin Wolf | |
1024 | 66f82cee | Kevin Wolf | int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
|
1025 | 45aba42f | Kevin Wolf | { |
1026 | 66f82cee | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
1027 | 45aba42f | Kevin Wolf | int ret, csize, nb_csectors, sector_offset;
|
1028 | 45aba42f | Kevin Wolf | uint64_t coffset; |
1029 | 45aba42f | Kevin Wolf | |
1030 | 45aba42f | Kevin Wolf | coffset = cluster_offset & s->cluster_offset_mask; |
1031 | 45aba42f | Kevin Wolf | if (s->cluster_cache_offset != coffset) {
|
1032 | 45aba42f | Kevin Wolf | nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
|
1033 | 45aba42f | Kevin Wolf | sector_offset = coffset & 511;
|
1034 | 45aba42f | Kevin Wolf | csize = nb_csectors * 512 - sector_offset;
|
1035 | 66f82cee | Kevin Wolf | BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); |
1036 | 66f82cee | Kevin Wolf | ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
|
1037 | 45aba42f | Kevin Wolf | if (ret < 0) { |
1038 | 8af36488 | Kevin Wolf | return ret;
|
1039 | 45aba42f | Kevin Wolf | } |
1040 | 45aba42f | Kevin Wolf | if (decompress_buffer(s->cluster_cache, s->cluster_size,
|
1041 | 45aba42f | Kevin Wolf | s->cluster_data + sector_offset, csize) < 0) {
|
1042 | 8af36488 | Kevin Wolf | return -EIO;
|
1043 | 45aba42f | Kevin Wolf | } |
1044 | 45aba42f | Kevin Wolf | s->cluster_cache_offset = coffset; |
1045 | 45aba42f | Kevin Wolf | } |
1046 | 45aba42f | Kevin Wolf | return 0; |
1047 | 45aba42f | Kevin Wolf | } |
1048 | 5ea929e3 | Kevin Wolf | |
1049 | 5ea929e3 | Kevin Wolf | /*
|
1050 | 5ea929e3 | Kevin Wolf | * This discards as many clusters of nb_clusters as possible at once (i.e.
|
1051 | 5ea929e3 | Kevin Wolf | * all clusters in the same L2 table) and returns the number of discarded
|
1052 | 5ea929e3 | Kevin Wolf | * clusters.
|
1053 | 5ea929e3 | Kevin Wolf | */
|
1054 | 5ea929e3 | Kevin Wolf | static int discard_single_l2(BlockDriverState *bs, uint64_t offset, |
1055 | 5ea929e3 | Kevin Wolf | unsigned int nb_clusters) |
1056 | 5ea929e3 | Kevin Wolf | { |
1057 | 5ea929e3 | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
1058 | 3948d1d4 | Kevin Wolf | uint64_t *l2_table; |
1059 | 5ea929e3 | Kevin Wolf | int l2_index;
|
1060 | 5ea929e3 | Kevin Wolf | int ret;
|
1061 | 5ea929e3 | Kevin Wolf | int i;
|
1062 | 5ea929e3 | Kevin Wolf | |
1063 | 3948d1d4 | Kevin Wolf | ret = get_cluster_table(bs, offset, &l2_table, &l2_index); |
1064 | 5ea929e3 | Kevin Wolf | if (ret < 0) { |
1065 | 5ea929e3 | Kevin Wolf | return ret;
|
1066 | 5ea929e3 | Kevin Wolf | } |
1067 | 5ea929e3 | Kevin Wolf | |
1068 | 5ea929e3 | Kevin Wolf | /* Limit nb_clusters to one L2 table */
|
1069 | 5ea929e3 | Kevin Wolf | nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); |
1070 | 5ea929e3 | Kevin Wolf | |
1071 | 5ea929e3 | Kevin Wolf | for (i = 0; i < nb_clusters; i++) { |
1072 | 5ea929e3 | Kevin Wolf | uint64_t old_offset; |
1073 | 5ea929e3 | Kevin Wolf | |
1074 | 5ea929e3 | Kevin Wolf | old_offset = be64_to_cpu(l2_table[l2_index + i]); |
1075 | 8e37f681 | Kevin Wolf | if ((old_offset & L2E_OFFSET_MASK) == 0) { |
1076 | 5ea929e3 | Kevin Wolf | continue;
|
1077 | 5ea929e3 | Kevin Wolf | } |
1078 | 5ea929e3 | Kevin Wolf | |
1079 | 5ea929e3 | Kevin Wolf | /* First remove L2 entries */
|
1080 | 5ea929e3 | Kevin Wolf | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
1081 | 5ea929e3 | Kevin Wolf | l2_table[l2_index + i] = cpu_to_be64(0);
|
1082 | 5ea929e3 | Kevin Wolf | |
1083 | 5ea929e3 | Kevin Wolf | /* Then decrease the refcount */
|
1084 | 5ea929e3 | Kevin Wolf | qcow2_free_any_clusters(bs, old_offset, 1);
|
1085 | 5ea929e3 | Kevin Wolf | } |
1086 | 5ea929e3 | Kevin Wolf | |
1087 | 5ea929e3 | Kevin Wolf | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
1088 | 5ea929e3 | Kevin Wolf | if (ret < 0) { |
1089 | 5ea929e3 | Kevin Wolf | return ret;
|
1090 | 5ea929e3 | Kevin Wolf | } |
1091 | 5ea929e3 | Kevin Wolf | |
1092 | 5ea929e3 | Kevin Wolf | return nb_clusters;
|
1093 | 5ea929e3 | Kevin Wolf | } |
1094 | 5ea929e3 | Kevin Wolf | |
1095 | 5ea929e3 | Kevin Wolf | int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
|
1096 | 5ea929e3 | Kevin Wolf | int nb_sectors)
|
1097 | 5ea929e3 | Kevin Wolf | { |
1098 | 5ea929e3 | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
1099 | 5ea929e3 | Kevin Wolf | uint64_t end_offset; |
1100 | 5ea929e3 | Kevin Wolf | unsigned int nb_clusters; |
1101 | 5ea929e3 | Kevin Wolf | int ret;
|
1102 | 5ea929e3 | Kevin Wolf | |
1103 | 5ea929e3 | Kevin Wolf | end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS); |
1104 | 5ea929e3 | Kevin Wolf | |
1105 | 5ea929e3 | Kevin Wolf | /* Round start up and end down */
|
1106 | 5ea929e3 | Kevin Wolf | offset = align_offset(offset, s->cluster_size); |
1107 | 5ea929e3 | Kevin Wolf | end_offset &= ~(s->cluster_size - 1);
|
1108 | 5ea929e3 | Kevin Wolf | |
1109 | 5ea929e3 | Kevin Wolf | if (offset > end_offset) {
|
1110 | 5ea929e3 | Kevin Wolf | return 0; |
1111 | 5ea929e3 | Kevin Wolf | } |
1112 | 5ea929e3 | Kevin Wolf | |
1113 | 5ea929e3 | Kevin Wolf | nb_clusters = size_to_clusters(s, end_offset - offset); |
1114 | 5ea929e3 | Kevin Wolf | |
1115 | 5ea929e3 | Kevin Wolf | /* Each L2 table is handled by its own loop iteration */
|
1116 | 5ea929e3 | Kevin Wolf | while (nb_clusters > 0) { |
1117 | 5ea929e3 | Kevin Wolf | ret = discard_single_l2(bs, offset, nb_clusters); |
1118 | 5ea929e3 | Kevin Wolf | if (ret < 0) { |
1119 | 5ea929e3 | Kevin Wolf | return ret;
|
1120 | 5ea929e3 | Kevin Wolf | } |
1121 | 5ea929e3 | Kevin Wolf | |
1122 | 5ea929e3 | Kevin Wolf | nb_clusters -= ret; |
1123 | 5ea929e3 | Kevin Wolf | offset += (ret * s->cluster_size); |
1124 | 5ea929e3 | Kevin Wolf | } |
1125 | 5ea929e3 | Kevin Wolf | |
1126 | 5ea929e3 | Kevin Wolf | return 0; |
1127 | 5ea929e3 | Kevin Wolf | } |
1128 | 621f0589 | Kevin Wolf | |
1129 | 621f0589 | Kevin Wolf | /*
|
1130 | 621f0589 | Kevin Wolf | * This zeroes as many clusters of nb_clusters as possible at once (i.e.
|
1131 | 621f0589 | Kevin Wolf | * all clusters in the same L2 table) and returns the number of zeroed
|
1132 | 621f0589 | Kevin Wolf | * clusters.
|
1133 | 621f0589 | Kevin Wolf | */
|
1134 | 621f0589 | Kevin Wolf | static int zero_single_l2(BlockDriverState *bs, uint64_t offset, |
1135 | 621f0589 | Kevin Wolf | unsigned int nb_clusters) |
1136 | 621f0589 | Kevin Wolf | { |
1137 | 621f0589 | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
1138 | 621f0589 | Kevin Wolf | uint64_t *l2_table; |
1139 | 621f0589 | Kevin Wolf | int l2_index;
|
1140 | 621f0589 | Kevin Wolf | int ret;
|
1141 | 621f0589 | Kevin Wolf | int i;
|
1142 | 621f0589 | Kevin Wolf | |
1143 | 621f0589 | Kevin Wolf | ret = get_cluster_table(bs, offset, &l2_table, &l2_index); |
1144 | 621f0589 | Kevin Wolf | if (ret < 0) { |
1145 | 621f0589 | Kevin Wolf | return ret;
|
1146 | 621f0589 | Kevin Wolf | } |
1147 | 621f0589 | Kevin Wolf | |
1148 | 621f0589 | Kevin Wolf | /* Limit nb_clusters to one L2 table */
|
1149 | 621f0589 | Kevin Wolf | nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); |
1150 | 621f0589 | Kevin Wolf | |
1151 | 621f0589 | Kevin Wolf | for (i = 0; i < nb_clusters; i++) { |
1152 | 621f0589 | Kevin Wolf | uint64_t old_offset; |
1153 | 621f0589 | Kevin Wolf | |
1154 | 621f0589 | Kevin Wolf | old_offset = be64_to_cpu(l2_table[l2_index + i]); |
1155 | 621f0589 | Kevin Wolf | |
1156 | 621f0589 | Kevin Wolf | /* Update L2 entries */
|
1157 | 621f0589 | Kevin Wolf | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
1158 | 621f0589 | Kevin Wolf | if (old_offset & QCOW_OFLAG_COMPRESSED) {
|
1159 | 621f0589 | Kevin Wolf | l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); |
1160 | 621f0589 | Kevin Wolf | qcow2_free_any_clusters(bs, old_offset, 1);
|
1161 | 621f0589 | Kevin Wolf | } else {
|
1162 | 621f0589 | Kevin Wolf | l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); |
1163 | 621f0589 | Kevin Wolf | } |
1164 | 621f0589 | Kevin Wolf | } |
1165 | 621f0589 | Kevin Wolf | |
1166 | 621f0589 | Kevin Wolf | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
|
1167 | 621f0589 | Kevin Wolf | if (ret < 0) { |
1168 | 621f0589 | Kevin Wolf | return ret;
|
1169 | 621f0589 | Kevin Wolf | } |
1170 | 621f0589 | Kevin Wolf | |
1171 | 621f0589 | Kevin Wolf | return nb_clusters;
|
1172 | 621f0589 | Kevin Wolf | } |
1173 | 621f0589 | Kevin Wolf | |
1174 | 621f0589 | Kevin Wolf | int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors) |
1175 | 621f0589 | Kevin Wolf | { |
1176 | 621f0589 | Kevin Wolf | BDRVQcowState *s = bs->opaque; |
1177 | 621f0589 | Kevin Wolf | unsigned int nb_clusters; |
1178 | 621f0589 | Kevin Wolf | int ret;
|
1179 | 621f0589 | Kevin Wolf | |
1180 | 621f0589 | Kevin Wolf | /* The zero flag is only supported by version 3 and newer */
|
1181 | 621f0589 | Kevin Wolf | if (s->qcow_version < 3) { |
1182 | 621f0589 | Kevin Wolf | return -ENOTSUP;
|
1183 | 621f0589 | Kevin Wolf | } |
1184 | 621f0589 | Kevin Wolf | |
1185 | 621f0589 | Kevin Wolf | /* Each L2 table is handled by its own loop iteration */
|
1186 | 621f0589 | Kevin Wolf | nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS); |
1187 | 621f0589 | Kevin Wolf | |
1188 | 621f0589 | Kevin Wolf | while (nb_clusters > 0) { |
1189 | 621f0589 | Kevin Wolf | ret = zero_single_l2(bs, offset, nb_clusters); |
1190 | 621f0589 | Kevin Wolf | if (ret < 0) { |
1191 | 621f0589 | Kevin Wolf | return ret;
|
1192 | 621f0589 | Kevin Wolf | } |
1193 | 621f0589 | Kevin Wolf | |
1194 | 621f0589 | Kevin Wolf | nb_clusters -= ret; |
1195 | 621f0589 | Kevin Wolf | offset += (ret * s->cluster_size); |
1196 | 621f0589 | Kevin Wolf | } |
1197 | 621f0589 | Kevin Wolf | |
1198 | 621f0589 | Kevin Wolf | return 0; |
1199 | 621f0589 | Kevin Wolf | } |