root / block / mirror.c @ 34b5d2c6
History | View | Annotate | Download (18.8 kB)
1 | 893f7eba | Paolo Bonzini | /*
|
---|---|---|---|
2 | 893f7eba | Paolo Bonzini | * Image mirroring
|
3 | 893f7eba | Paolo Bonzini | *
|
4 | 893f7eba | Paolo Bonzini | * Copyright Red Hat, Inc. 2012
|
5 | 893f7eba | Paolo Bonzini | *
|
6 | 893f7eba | Paolo Bonzini | * Authors:
|
7 | 893f7eba | Paolo Bonzini | * Paolo Bonzini <pbonzini@redhat.com>
|
8 | 893f7eba | Paolo Bonzini | *
|
9 | 893f7eba | Paolo Bonzini | * This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
10 | 893f7eba | Paolo Bonzini | * See the COPYING.LIB file in the top-level directory.
|
11 | 893f7eba | Paolo Bonzini | *
|
12 | 893f7eba | Paolo Bonzini | */
|
13 | 893f7eba | Paolo Bonzini | |
14 | 893f7eba | Paolo Bonzini | #include "trace.h" |
15 | 737e150e | Paolo Bonzini | #include "block/blockjob.h" |
16 | 737e150e | Paolo Bonzini | #include "block/block_int.h" |
17 | 893f7eba | Paolo Bonzini | #include "qemu/ratelimit.h" |
18 | b812f671 | Paolo Bonzini | #include "qemu/bitmap.h" |
19 | 893f7eba | Paolo Bonzini | |
20 | 402a4741 | Paolo Bonzini | #define SLICE_TIME 100000000ULL /* ns */ |
21 | 402a4741 | Paolo Bonzini | #define MAX_IN_FLIGHT 16 |
22 | 402a4741 | Paolo Bonzini | |
23 | 402a4741 | Paolo Bonzini | /* The mirroring buffer is a list of granularity-sized chunks.
|
24 | 402a4741 | Paolo Bonzini | * Free chunks are organized in a list.
|
25 | 402a4741 | Paolo Bonzini | */
|
26 | 402a4741 | Paolo Bonzini | typedef struct MirrorBuffer { |
27 | 402a4741 | Paolo Bonzini | QSIMPLEQ_ENTRY(MirrorBuffer) next; |
28 | 402a4741 | Paolo Bonzini | } MirrorBuffer; |
29 | 893f7eba | Paolo Bonzini | |
30 | 893f7eba | Paolo Bonzini | typedef struct MirrorBlockJob { |
31 | 893f7eba | Paolo Bonzini | BlockJob common; |
32 | 893f7eba | Paolo Bonzini | RateLimit limit; |
33 | 893f7eba | Paolo Bonzini | BlockDriverState *target; |
34 | 893f7eba | Paolo Bonzini | MirrorSyncMode mode; |
35 | b952b558 | Paolo Bonzini | BlockdevOnError on_source_error, on_target_error; |
36 | d63ffd87 | Paolo Bonzini | bool synced;
|
37 | d63ffd87 | Paolo Bonzini | bool should_complete;
|
38 | 893f7eba | Paolo Bonzini | int64_t sector_num; |
39 | eee13dfe | Paolo Bonzini | int64_t granularity; |
40 | b812f671 | Paolo Bonzini | size_t buf_size; |
41 | b812f671 | Paolo Bonzini | unsigned long *cow_bitmap; |
42 | 8f0720ec | Paolo Bonzini | HBitmapIter hbi; |
43 | 893f7eba | Paolo Bonzini | uint8_t *buf; |
44 | 402a4741 | Paolo Bonzini | QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; |
45 | 402a4741 | Paolo Bonzini | int buf_free_count;
|
46 | bd48bde8 | Paolo Bonzini | |
47 | 402a4741 | Paolo Bonzini | unsigned long *in_flight_bitmap; |
48 | bd48bde8 | Paolo Bonzini | int in_flight;
|
49 | bd48bde8 | Paolo Bonzini | int ret;
|
50 | 893f7eba | Paolo Bonzini | } MirrorBlockJob; |
51 | 893f7eba | Paolo Bonzini | |
52 | bd48bde8 | Paolo Bonzini | typedef struct MirrorOp { |
53 | bd48bde8 | Paolo Bonzini | MirrorBlockJob *s; |
54 | bd48bde8 | Paolo Bonzini | QEMUIOVector qiov; |
55 | bd48bde8 | Paolo Bonzini | int64_t sector_num; |
56 | bd48bde8 | Paolo Bonzini | int nb_sectors;
|
57 | bd48bde8 | Paolo Bonzini | } MirrorOp; |
58 | bd48bde8 | Paolo Bonzini | |
59 | b952b558 | Paolo Bonzini | static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, |
60 | b952b558 | Paolo Bonzini | int error)
|
61 | b952b558 | Paolo Bonzini | { |
62 | b952b558 | Paolo Bonzini | s->synced = false;
|
63 | b952b558 | Paolo Bonzini | if (read) {
|
64 | b952b558 | Paolo Bonzini | return block_job_error_action(&s->common, s->common.bs,
|
65 | b952b558 | Paolo Bonzini | s->on_source_error, true, error);
|
66 | b952b558 | Paolo Bonzini | } else {
|
67 | b952b558 | Paolo Bonzini | return block_job_error_action(&s->common, s->target,
|
68 | b952b558 | Paolo Bonzini | s->on_target_error, false, error);
|
69 | b952b558 | Paolo Bonzini | } |
70 | b952b558 | Paolo Bonzini | } |
71 | b952b558 | Paolo Bonzini | |
72 | bd48bde8 | Paolo Bonzini | static void mirror_iteration_done(MirrorOp *op, int ret) |
73 | bd48bde8 | Paolo Bonzini | { |
74 | bd48bde8 | Paolo Bonzini | MirrorBlockJob *s = op->s; |
75 | 402a4741 | Paolo Bonzini | struct iovec *iov;
|
76 | bd48bde8 | Paolo Bonzini | int64_t chunk_num; |
77 | 402a4741 | Paolo Bonzini | int i, nb_chunks, sectors_per_chunk;
|
78 | bd48bde8 | Paolo Bonzini | |
79 | bd48bde8 | Paolo Bonzini | trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret); |
80 | bd48bde8 | Paolo Bonzini | |
81 | bd48bde8 | Paolo Bonzini | s->in_flight--; |
82 | 402a4741 | Paolo Bonzini | iov = op->qiov.iov; |
83 | 402a4741 | Paolo Bonzini | for (i = 0; i < op->qiov.niov; i++) { |
84 | 402a4741 | Paolo Bonzini | MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; |
85 | 402a4741 | Paolo Bonzini | QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); |
86 | 402a4741 | Paolo Bonzini | s->buf_free_count++; |
87 | 402a4741 | Paolo Bonzini | } |
88 | 402a4741 | Paolo Bonzini | |
89 | bd48bde8 | Paolo Bonzini | sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; |
90 | bd48bde8 | Paolo Bonzini | chunk_num = op->sector_num / sectors_per_chunk; |
91 | bd48bde8 | Paolo Bonzini | nb_chunks = op->nb_sectors / sectors_per_chunk; |
92 | 402a4741 | Paolo Bonzini | bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); |
93 | bd48bde8 | Paolo Bonzini | if (s->cow_bitmap && ret >= 0) { |
94 | bd48bde8 | Paolo Bonzini | bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); |
95 | bd48bde8 | Paolo Bonzini | } |
96 | bd48bde8 | Paolo Bonzini | |
97 | bd48bde8 | Paolo Bonzini | g_slice_free(MirrorOp, op); |
98 | bd48bde8 | Paolo Bonzini | qemu_coroutine_enter(s->common.co, NULL);
|
99 | bd48bde8 | Paolo Bonzini | } |
100 | bd48bde8 | Paolo Bonzini | |
101 | bd48bde8 | Paolo Bonzini | static void mirror_write_complete(void *opaque, int ret) |
102 | bd48bde8 | Paolo Bonzini | { |
103 | bd48bde8 | Paolo Bonzini | MirrorOp *op = opaque; |
104 | bd48bde8 | Paolo Bonzini | MirrorBlockJob *s = op->s; |
105 | bd48bde8 | Paolo Bonzini | if (ret < 0) { |
106 | bd48bde8 | Paolo Bonzini | BlockDriverState *source = s->common.bs; |
107 | bd48bde8 | Paolo Bonzini | BlockErrorAction action; |
108 | bd48bde8 | Paolo Bonzini | |
109 | bd48bde8 | Paolo Bonzini | bdrv_set_dirty(source, op->sector_num, op->nb_sectors); |
110 | bd48bde8 | Paolo Bonzini | action = mirror_error_action(s, false, -ret);
|
111 | bd48bde8 | Paolo Bonzini | if (action == BDRV_ACTION_REPORT && s->ret >= 0) { |
112 | bd48bde8 | Paolo Bonzini | s->ret = ret; |
113 | bd48bde8 | Paolo Bonzini | } |
114 | bd48bde8 | Paolo Bonzini | } |
115 | bd48bde8 | Paolo Bonzini | mirror_iteration_done(op, ret); |
116 | bd48bde8 | Paolo Bonzini | } |
117 | bd48bde8 | Paolo Bonzini | |
118 | bd48bde8 | Paolo Bonzini | static void mirror_read_complete(void *opaque, int ret) |
119 | bd48bde8 | Paolo Bonzini | { |
120 | bd48bde8 | Paolo Bonzini | MirrorOp *op = opaque; |
121 | bd48bde8 | Paolo Bonzini | MirrorBlockJob *s = op->s; |
122 | bd48bde8 | Paolo Bonzini | if (ret < 0) { |
123 | bd48bde8 | Paolo Bonzini | BlockDriverState *source = s->common.bs; |
124 | bd48bde8 | Paolo Bonzini | BlockErrorAction action; |
125 | bd48bde8 | Paolo Bonzini | |
126 | bd48bde8 | Paolo Bonzini | bdrv_set_dirty(source, op->sector_num, op->nb_sectors); |
127 | bd48bde8 | Paolo Bonzini | action = mirror_error_action(s, true, -ret);
|
128 | bd48bde8 | Paolo Bonzini | if (action == BDRV_ACTION_REPORT && s->ret >= 0) { |
129 | bd48bde8 | Paolo Bonzini | s->ret = ret; |
130 | bd48bde8 | Paolo Bonzini | } |
131 | bd48bde8 | Paolo Bonzini | |
132 | bd48bde8 | Paolo Bonzini | mirror_iteration_done(op, ret); |
133 | bd48bde8 | Paolo Bonzini | return;
|
134 | bd48bde8 | Paolo Bonzini | } |
135 | bd48bde8 | Paolo Bonzini | bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors, |
136 | bd48bde8 | Paolo Bonzini | mirror_write_complete, op); |
137 | bd48bde8 | Paolo Bonzini | } |
138 | bd48bde8 | Paolo Bonzini | |
139 | bd48bde8 | Paolo Bonzini | static void coroutine_fn mirror_iteration(MirrorBlockJob *s) |
140 | 893f7eba | Paolo Bonzini | { |
141 | 893f7eba | Paolo Bonzini | BlockDriverState *source = s->common.bs; |
142 | 402a4741 | Paolo Bonzini | int nb_sectors, sectors_per_chunk, nb_chunks;
|
143 | 884fea4e | Paolo Bonzini | int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector; |
144 | bd48bde8 | Paolo Bonzini | MirrorOp *op; |
145 | 893f7eba | Paolo Bonzini | |
146 | 8f0720ec | Paolo Bonzini | s->sector_num = hbitmap_iter_next(&s->hbi); |
147 | 8f0720ec | Paolo Bonzini | if (s->sector_num < 0) { |
148 | 8f0720ec | Paolo Bonzini | bdrv_dirty_iter_init(source, &s->hbi); |
149 | 8f0720ec | Paolo Bonzini | s->sector_num = hbitmap_iter_next(&s->hbi); |
150 | 8f0720ec | Paolo Bonzini | trace_mirror_restart_iter(s, bdrv_get_dirty_count(source)); |
151 | 8f0720ec | Paolo Bonzini | assert(s->sector_num >= 0);
|
152 | 8f0720ec | Paolo Bonzini | } |
153 | 8f0720ec | Paolo Bonzini | |
154 | 402a4741 | Paolo Bonzini | hbitmap_next_sector = s->sector_num; |
155 | 884fea4e | Paolo Bonzini | sector_num = s->sector_num; |
156 | 884fea4e | Paolo Bonzini | sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; |
157 | 884fea4e | Paolo Bonzini | end = s->common.len >> BDRV_SECTOR_BITS; |
158 | 402a4741 | Paolo Bonzini | |
159 | 884fea4e | Paolo Bonzini | /* Extend the QEMUIOVector to include all adjacent blocks that will
|
160 | 884fea4e | Paolo Bonzini | * be copied in this operation.
|
161 | b812f671 | Paolo Bonzini | *
|
162 | 884fea4e | Paolo Bonzini | * We have to do this if we have no backing file yet in the destination,
|
163 | 884fea4e | Paolo Bonzini | * and the cluster size is very large. Then we need to do COW ourselves.
|
164 | 884fea4e | Paolo Bonzini | * The first time a cluster is copied, copy it entirely. Note that,
|
165 | 884fea4e | Paolo Bonzini | * because both the granularity and the cluster size are powers of two,
|
166 | 884fea4e | Paolo Bonzini | * the number of sectors to copy cannot exceed one cluster.
|
167 | 884fea4e | Paolo Bonzini | *
|
168 | 884fea4e | Paolo Bonzini | * We also want to extend the QEMUIOVector to include more adjacent
|
169 | 884fea4e | Paolo Bonzini | * dirty blocks if possible, to limit the number of I/O operations and
|
170 | 884fea4e | Paolo Bonzini | * run efficiently even with a small granularity.
|
171 | b812f671 | Paolo Bonzini | */
|
172 | 884fea4e | Paolo Bonzini | nb_chunks = 0;
|
173 | 884fea4e | Paolo Bonzini | nb_sectors = 0;
|
174 | 884fea4e | Paolo Bonzini | next_sector = sector_num; |
175 | 884fea4e | Paolo Bonzini | next_chunk = sector_num / sectors_per_chunk; |
176 | 402a4741 | Paolo Bonzini | |
177 | 402a4741 | Paolo Bonzini | /* Wait for I/O to this cluster (from a previous iteration) to be done. */
|
178 | 884fea4e | Paolo Bonzini | while (test_bit(next_chunk, s->in_flight_bitmap)) {
|
179 | 402a4741 | Paolo Bonzini | trace_mirror_yield_in_flight(s, sector_num, s->in_flight); |
180 | 402a4741 | Paolo Bonzini | qemu_coroutine_yield(); |
181 | b812f671 | Paolo Bonzini | } |
182 | b812f671 | Paolo Bonzini | |
183 | 884fea4e | Paolo Bonzini | do {
|
184 | 884fea4e | Paolo Bonzini | int added_sectors, added_chunks;
|
185 | 884fea4e | Paolo Bonzini | |
186 | 884fea4e | Paolo Bonzini | if (!bdrv_get_dirty(source, next_sector) ||
|
187 | 884fea4e | Paolo Bonzini | test_bit(next_chunk, s->in_flight_bitmap)) { |
188 | 884fea4e | Paolo Bonzini | assert(nb_sectors > 0);
|
189 | 884fea4e | Paolo Bonzini | break;
|
190 | 884fea4e | Paolo Bonzini | } |
191 | 884fea4e | Paolo Bonzini | |
192 | 884fea4e | Paolo Bonzini | added_sectors = sectors_per_chunk; |
193 | 884fea4e | Paolo Bonzini | if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) {
|
194 | 884fea4e | Paolo Bonzini | bdrv_round_to_clusters(s->target, |
195 | 884fea4e | Paolo Bonzini | next_sector, added_sectors, |
196 | 884fea4e | Paolo Bonzini | &next_sector, &added_sectors); |
197 | 884fea4e | Paolo Bonzini | |
198 | 884fea4e | Paolo Bonzini | /* On the first iteration, the rounding may make us copy
|
199 | 884fea4e | Paolo Bonzini | * sectors before the first dirty one.
|
200 | 884fea4e | Paolo Bonzini | */
|
201 | 884fea4e | Paolo Bonzini | if (next_sector < sector_num) {
|
202 | 884fea4e | Paolo Bonzini | assert(nb_sectors == 0);
|
203 | 884fea4e | Paolo Bonzini | sector_num = next_sector; |
204 | 884fea4e | Paolo Bonzini | next_chunk = next_sector / sectors_per_chunk; |
205 | 884fea4e | Paolo Bonzini | } |
206 | 884fea4e | Paolo Bonzini | } |
207 | 884fea4e | Paolo Bonzini | |
208 | 884fea4e | Paolo Bonzini | added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors)); |
209 | 884fea4e | Paolo Bonzini | added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk;
|
210 | 884fea4e | Paolo Bonzini | |
211 | 884fea4e | Paolo Bonzini | /* When doing COW, it may happen that there is not enough space for
|
212 | 884fea4e | Paolo Bonzini | * a full cluster. Wait if that is the case.
|
213 | 884fea4e | Paolo Bonzini | */
|
214 | 884fea4e | Paolo Bonzini | while (nb_chunks == 0 && s->buf_free_count < added_chunks) { |
215 | 884fea4e | Paolo Bonzini | trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight); |
216 | 884fea4e | Paolo Bonzini | qemu_coroutine_yield(); |
217 | 884fea4e | Paolo Bonzini | } |
218 | 884fea4e | Paolo Bonzini | if (s->buf_free_count < nb_chunks + added_chunks) {
|
219 | 884fea4e | Paolo Bonzini | trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight); |
220 | 884fea4e | Paolo Bonzini | break;
|
221 | 884fea4e | Paolo Bonzini | } |
222 | 884fea4e | Paolo Bonzini | |
223 | 884fea4e | Paolo Bonzini | /* We have enough free space to copy these sectors. */
|
224 | 884fea4e | Paolo Bonzini | bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks); |
225 | 402a4741 | Paolo Bonzini | |
226 | 884fea4e | Paolo Bonzini | nb_sectors += added_sectors; |
227 | 884fea4e | Paolo Bonzini | nb_chunks += added_chunks; |
228 | 884fea4e | Paolo Bonzini | next_sector += added_sectors; |
229 | 884fea4e | Paolo Bonzini | next_chunk += added_chunks; |
230 | 884fea4e | Paolo Bonzini | } while (next_sector < end);
|
231 | bd48bde8 | Paolo Bonzini | |
232 | bd48bde8 | Paolo Bonzini | /* Allocate a MirrorOp that is used as an AIO callback. */
|
233 | bd48bde8 | Paolo Bonzini | op = g_slice_new(MirrorOp); |
234 | bd48bde8 | Paolo Bonzini | op->s = s; |
235 | bd48bde8 | Paolo Bonzini | op->sector_num = sector_num; |
236 | bd48bde8 | Paolo Bonzini | op->nb_sectors = nb_sectors; |
237 | 402a4741 | Paolo Bonzini | |
238 | 402a4741 | Paolo Bonzini | /* Now make a QEMUIOVector taking enough granularity-sized chunks
|
239 | 402a4741 | Paolo Bonzini | * from s->buf_free.
|
240 | 402a4741 | Paolo Bonzini | */
|
241 | 402a4741 | Paolo Bonzini | qemu_iovec_init(&op->qiov, nb_chunks); |
242 | 402a4741 | Paolo Bonzini | next_sector = sector_num; |
243 | 402a4741 | Paolo Bonzini | while (nb_chunks-- > 0) { |
244 | 402a4741 | Paolo Bonzini | MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); |
245 | 402a4741 | Paolo Bonzini | QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); |
246 | 402a4741 | Paolo Bonzini | s->buf_free_count--; |
247 | 402a4741 | Paolo Bonzini | qemu_iovec_add(&op->qiov, buf, s->granularity); |
248 | 402a4741 | Paolo Bonzini | |
249 | 402a4741 | Paolo Bonzini | /* Advance the HBitmapIter in parallel, so that we do not examine
|
250 | 402a4741 | Paolo Bonzini | * the same sector twice.
|
251 | 402a4741 | Paolo Bonzini | */
|
252 | 402a4741 | Paolo Bonzini | if (next_sector > hbitmap_next_sector && bdrv_get_dirty(source, next_sector)) {
|
253 | 402a4741 | Paolo Bonzini | hbitmap_next_sector = hbitmap_iter_next(&s->hbi); |
254 | 402a4741 | Paolo Bonzini | } |
255 | 402a4741 | Paolo Bonzini | |
256 | 402a4741 | Paolo Bonzini | next_sector += sectors_per_chunk; |
257 | 402a4741 | Paolo Bonzini | } |
258 | bd48bde8 | Paolo Bonzini | |
259 | b812f671 | Paolo Bonzini | bdrv_reset_dirty(source, sector_num, nb_sectors); |
260 | 893f7eba | Paolo Bonzini | |
261 | 893f7eba | Paolo Bonzini | /* Copy the dirty cluster. */
|
262 | bd48bde8 | Paolo Bonzini | s->in_flight++; |
263 | b812f671 | Paolo Bonzini | trace_mirror_one_iteration(s, sector_num, nb_sectors); |
264 | bd48bde8 | Paolo Bonzini | bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors, |
265 | bd48bde8 | Paolo Bonzini | mirror_read_complete, op); |
266 | bd48bde8 | Paolo Bonzini | } |
267 | b952b558 | Paolo Bonzini | |
268 | 402a4741 | Paolo Bonzini | static void mirror_free_init(MirrorBlockJob *s) |
269 | 402a4741 | Paolo Bonzini | { |
270 | 402a4741 | Paolo Bonzini | int granularity = s->granularity;
|
271 | 402a4741 | Paolo Bonzini | size_t buf_size = s->buf_size; |
272 | 402a4741 | Paolo Bonzini | uint8_t *buf = s->buf; |
273 | 402a4741 | Paolo Bonzini | |
274 | 402a4741 | Paolo Bonzini | assert(s->buf_free_count == 0);
|
275 | 402a4741 | Paolo Bonzini | QSIMPLEQ_INIT(&s->buf_free); |
276 | 402a4741 | Paolo Bonzini | while (buf_size != 0) { |
277 | 402a4741 | Paolo Bonzini | MirrorBuffer *cur = (MirrorBuffer *)buf; |
278 | 402a4741 | Paolo Bonzini | QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); |
279 | 402a4741 | Paolo Bonzini | s->buf_free_count++; |
280 | 402a4741 | Paolo Bonzini | buf_size -= granularity; |
281 | 402a4741 | Paolo Bonzini | buf += granularity; |
282 | 402a4741 | Paolo Bonzini | } |
283 | 402a4741 | Paolo Bonzini | } |
284 | 402a4741 | Paolo Bonzini | |
285 | bd48bde8 | Paolo Bonzini | static void mirror_drain(MirrorBlockJob *s) |
286 | bd48bde8 | Paolo Bonzini | { |
287 | bd48bde8 | Paolo Bonzini | while (s->in_flight > 0) { |
288 | bd48bde8 | Paolo Bonzini | qemu_coroutine_yield(); |
289 | bd48bde8 | Paolo Bonzini | } |
290 | 893f7eba | Paolo Bonzini | } |
291 | 893f7eba | Paolo Bonzini | |
292 | 893f7eba | Paolo Bonzini | static void coroutine_fn mirror_run(void *opaque) |
293 | 893f7eba | Paolo Bonzini | { |
294 | 893f7eba | Paolo Bonzini | MirrorBlockJob *s = opaque; |
295 | 893f7eba | Paolo Bonzini | BlockDriverState *bs = s->common.bs; |
296 | eee13dfe | Paolo Bonzini | int64_t sector_num, end, sectors_per_chunk, length; |
297 | bd48bde8 | Paolo Bonzini | uint64_t last_pause_ns; |
298 | b812f671 | Paolo Bonzini | BlockDriverInfo bdi; |
299 | b812f671 | Paolo Bonzini | char backing_filename[1024]; |
300 | 893f7eba | Paolo Bonzini | int ret = 0; |
301 | 893f7eba | Paolo Bonzini | int n;
|
302 | 893f7eba | Paolo Bonzini | |
303 | 893f7eba | Paolo Bonzini | if (block_job_is_cancelled(&s->common)) {
|
304 | 893f7eba | Paolo Bonzini | goto immediate_exit;
|
305 | 893f7eba | Paolo Bonzini | } |
306 | 893f7eba | Paolo Bonzini | |
307 | 893f7eba | Paolo Bonzini | s->common.len = bdrv_getlength(bs); |
308 | 88ff0e48 | Paolo Bonzini | if (s->common.len <= 0) { |
309 | 893f7eba | Paolo Bonzini | block_job_completed(&s->common, s->common.len); |
310 | 893f7eba | Paolo Bonzini | return;
|
311 | 893f7eba | Paolo Bonzini | } |
312 | 893f7eba | Paolo Bonzini | |
313 | 402a4741 | Paolo Bonzini | length = (bdrv_getlength(bs) + s->granularity - 1) / s->granularity;
|
314 | 402a4741 | Paolo Bonzini | s->in_flight_bitmap = bitmap_new(length); |
315 | 402a4741 | Paolo Bonzini | |
316 | b812f671 | Paolo Bonzini | /* If we have no backing file yet in the destination, we cannot let
|
317 | b812f671 | Paolo Bonzini | * the destination do COW. Instead, we copy sectors around the
|
318 | b812f671 | Paolo Bonzini | * dirty data if needed. We need a bitmap to do that.
|
319 | b812f671 | Paolo Bonzini | */
|
320 | b812f671 | Paolo Bonzini | bdrv_get_backing_filename(s->target, backing_filename, |
321 | b812f671 | Paolo Bonzini | sizeof(backing_filename));
|
322 | b812f671 | Paolo Bonzini | if (backing_filename[0] && !s->target->backing_hd) { |
323 | b812f671 | Paolo Bonzini | bdrv_get_info(s->target, &bdi); |
324 | eee13dfe | Paolo Bonzini | if (s->granularity < bdi.cluster_size) {
|
325 | 08e4ed6c | Paolo Bonzini | s->buf_size = MAX(s->buf_size, bdi.cluster_size); |
326 | b812f671 | Paolo Bonzini | s->cow_bitmap = bitmap_new(length); |
327 | b812f671 | Paolo Bonzini | } |
328 | b812f671 | Paolo Bonzini | } |
329 | b812f671 | Paolo Bonzini | |
330 | 893f7eba | Paolo Bonzini | end = s->common.len >> BDRV_SECTOR_BITS; |
331 | b812f671 | Paolo Bonzini | s->buf = qemu_blockalign(bs, s->buf_size); |
332 | eee13dfe | Paolo Bonzini | sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; |
333 | 402a4741 | Paolo Bonzini | mirror_free_init(s); |
334 | 893f7eba | Paolo Bonzini | |
335 | 893f7eba | Paolo Bonzini | if (s->mode != MIRROR_SYNC_MODE_NONE) {
|
336 | 893f7eba | Paolo Bonzini | /* First part, loop on the sectors and initialize the dirty bitmap. */
|
337 | 893f7eba | Paolo Bonzini | BlockDriverState *base; |
338 | 893f7eba | Paolo Bonzini | base = s->mode == MIRROR_SYNC_MODE_FULL ? NULL : bs->backing_hd;
|
339 | 893f7eba | Paolo Bonzini | for (sector_num = 0; sector_num < end; ) { |
340 | eee13dfe | Paolo Bonzini | int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1; |
341 | 4f578637 | Paolo Bonzini | ret = bdrv_is_allocated_above(bs, base, |
342 | 4f578637 | Paolo Bonzini | sector_num, next - sector_num, &n); |
343 | 893f7eba | Paolo Bonzini | |
344 | 893f7eba | Paolo Bonzini | if (ret < 0) { |
345 | 893f7eba | Paolo Bonzini | goto immediate_exit;
|
346 | 893f7eba | Paolo Bonzini | } |
347 | 893f7eba | Paolo Bonzini | |
348 | 893f7eba | Paolo Bonzini | assert(n > 0);
|
349 | 893f7eba | Paolo Bonzini | if (ret == 1) { |
350 | 893f7eba | Paolo Bonzini | bdrv_set_dirty(bs, sector_num, n); |
351 | 893f7eba | Paolo Bonzini | sector_num = next; |
352 | 893f7eba | Paolo Bonzini | } else {
|
353 | 893f7eba | Paolo Bonzini | sector_num += n; |
354 | 893f7eba | Paolo Bonzini | } |
355 | 893f7eba | Paolo Bonzini | } |
356 | 893f7eba | Paolo Bonzini | } |
357 | 893f7eba | Paolo Bonzini | |
358 | 8f0720ec | Paolo Bonzini | bdrv_dirty_iter_init(bs, &s->hbi); |
359 | bc72ad67 | Alex Bligh | last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
360 | 893f7eba | Paolo Bonzini | for (;;) {
|
361 | 893f7eba | Paolo Bonzini | uint64_t delay_ns; |
362 | 893f7eba | Paolo Bonzini | int64_t cnt; |
363 | 893f7eba | Paolo Bonzini | bool should_complete;
|
364 | 893f7eba | Paolo Bonzini | |
365 | bd48bde8 | Paolo Bonzini | if (s->ret < 0) { |
366 | bd48bde8 | Paolo Bonzini | ret = s->ret; |
367 | bd48bde8 | Paolo Bonzini | goto immediate_exit;
|
368 | bd48bde8 | Paolo Bonzini | } |
369 | bd48bde8 | Paolo Bonzini | |
370 | 893f7eba | Paolo Bonzini | cnt = bdrv_get_dirty_count(bs); |
371 | bd48bde8 | Paolo Bonzini | |
372 | bd48bde8 | Paolo Bonzini | /* Note that even when no rate limit is applied we need to yield
|
373 | bd48bde8 | Paolo Bonzini | * periodically with no pending I/O so that qemu_aio_flush() returns.
|
374 | bd48bde8 | Paolo Bonzini | * We do so every SLICE_TIME nanoseconds, or when there is an error,
|
375 | bd48bde8 | Paolo Bonzini | * or when the source is clean, whichever comes first.
|
376 | bd48bde8 | Paolo Bonzini | */
|
377 | bc72ad67 | Alex Bligh | if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME &&
|
378 | bd48bde8 | Paolo Bonzini | s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { |
379 | 402a4741 | Paolo Bonzini | if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 || |
380 | 402a4741 | Paolo Bonzini | (cnt == 0 && s->in_flight > 0)) { |
381 | 402a4741 | Paolo Bonzini | trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); |
382 | bd48bde8 | Paolo Bonzini | qemu_coroutine_yield(); |
383 | bd48bde8 | Paolo Bonzini | continue;
|
384 | bd48bde8 | Paolo Bonzini | } else if (cnt != 0) { |
385 | bd48bde8 | Paolo Bonzini | mirror_iteration(s); |
386 | bd48bde8 | Paolo Bonzini | continue;
|
387 | 893f7eba | Paolo Bonzini | } |
388 | 893f7eba | Paolo Bonzini | } |
389 | 893f7eba | Paolo Bonzini | |
390 | 893f7eba | Paolo Bonzini | should_complete = false;
|
391 | bd48bde8 | Paolo Bonzini | if (s->in_flight == 0 && cnt == 0) { |
392 | 893f7eba | Paolo Bonzini | trace_mirror_before_flush(s); |
393 | 893f7eba | Paolo Bonzini | ret = bdrv_flush(s->target); |
394 | 893f7eba | Paolo Bonzini | if (ret < 0) { |
395 | b952b558 | Paolo Bonzini | if (mirror_error_action(s, false, -ret) == BDRV_ACTION_REPORT) { |
396 | b952b558 | Paolo Bonzini | goto immediate_exit;
|
397 | b952b558 | Paolo Bonzini | } |
398 | b952b558 | Paolo Bonzini | } else {
|
399 | b952b558 | Paolo Bonzini | /* We're out of the streaming phase. From now on, if the job
|
400 | b952b558 | Paolo Bonzini | * is cancelled we will actually complete all pending I/O and
|
401 | b952b558 | Paolo Bonzini | * report completion. This way, block-job-cancel will leave
|
402 | b952b558 | Paolo Bonzini | * the target in a consistent state.
|
403 | b952b558 | Paolo Bonzini | */
|
404 | b952b558 | Paolo Bonzini | s->common.offset = end * BDRV_SECTOR_SIZE; |
405 | b952b558 | Paolo Bonzini | if (!s->synced) {
|
406 | b952b558 | Paolo Bonzini | block_job_ready(&s->common); |
407 | b952b558 | Paolo Bonzini | s->synced = true;
|
408 | b952b558 | Paolo Bonzini | } |
409 | b952b558 | Paolo Bonzini | |
410 | b952b558 | Paolo Bonzini | should_complete = s->should_complete || |
411 | b952b558 | Paolo Bonzini | block_job_is_cancelled(&s->common); |
412 | b952b558 | Paolo Bonzini | cnt = bdrv_get_dirty_count(bs); |
413 | d63ffd87 | Paolo Bonzini | } |
414 | 893f7eba | Paolo Bonzini | } |
415 | 893f7eba | Paolo Bonzini | |
416 | 893f7eba | Paolo Bonzini | if (cnt == 0 && should_complete) { |
417 | 893f7eba | Paolo Bonzini | /* The dirty bitmap is not updated while operations are pending.
|
418 | 893f7eba | Paolo Bonzini | * If we're about to exit, wait for pending operations before
|
419 | 893f7eba | Paolo Bonzini | * calling bdrv_get_dirty_count(bs), or we may exit while the
|
420 | 893f7eba | Paolo Bonzini | * source has dirty data to copy!
|
421 | 893f7eba | Paolo Bonzini | *
|
422 | 893f7eba | Paolo Bonzini | * Note that I/O can be submitted by the guest while
|
423 | 893f7eba | Paolo Bonzini | * mirror_populate runs.
|
424 | 893f7eba | Paolo Bonzini | */
|
425 | 893f7eba | Paolo Bonzini | trace_mirror_before_drain(s, cnt); |
426 | 893f7eba | Paolo Bonzini | bdrv_drain_all(); |
427 | 893f7eba | Paolo Bonzini | cnt = bdrv_get_dirty_count(bs); |
428 | 893f7eba | Paolo Bonzini | } |
429 | 893f7eba | Paolo Bonzini | |
430 | 893f7eba | Paolo Bonzini | ret = 0;
|
431 | d63ffd87 | Paolo Bonzini | trace_mirror_before_sleep(s, cnt, s->synced); |
432 | d63ffd87 | Paolo Bonzini | if (!s->synced) {
|
433 | 893f7eba | Paolo Bonzini | /* Publish progress */
|
434 | acc906c6 | Paolo Bonzini | s->common.offset = (end - cnt) * BDRV_SECTOR_SIZE; |
435 | 893f7eba | Paolo Bonzini | |
436 | 893f7eba | Paolo Bonzini | if (s->common.speed) {
|
437 | eee13dfe | Paolo Bonzini | delay_ns = ratelimit_calculate_delay(&s->limit, sectors_per_chunk); |
438 | 893f7eba | Paolo Bonzini | } else {
|
439 | 893f7eba | Paolo Bonzini | delay_ns = 0;
|
440 | 893f7eba | Paolo Bonzini | } |
441 | 893f7eba | Paolo Bonzini | |
442 | 7483d1e5 | Alex Bligh | block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); |
443 | 893f7eba | Paolo Bonzini | if (block_job_is_cancelled(&s->common)) {
|
444 | 893f7eba | Paolo Bonzini | break;
|
445 | 893f7eba | Paolo Bonzini | } |
446 | 893f7eba | Paolo Bonzini | } else if (!should_complete) { |
447 | bd48bde8 | Paolo Bonzini | delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); |
448 | 7483d1e5 | Alex Bligh | block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); |
449 | 893f7eba | Paolo Bonzini | } else if (cnt == 0) { |
450 | 893f7eba | Paolo Bonzini | /* The two disks are in sync. Exit and report successful
|
451 | 893f7eba | Paolo Bonzini | * completion.
|
452 | 893f7eba | Paolo Bonzini | */
|
453 | 893f7eba | Paolo Bonzini | assert(QLIST_EMPTY(&bs->tracked_requests)); |
454 | 893f7eba | Paolo Bonzini | s->common.cancelled = false;
|
455 | 893f7eba | Paolo Bonzini | break;
|
456 | 893f7eba | Paolo Bonzini | } |
457 | bc72ad67 | Alex Bligh | last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
458 | 893f7eba | Paolo Bonzini | } |
459 | 893f7eba | Paolo Bonzini | |
460 | 893f7eba | Paolo Bonzini | immediate_exit:
|
461 | bd48bde8 | Paolo Bonzini | if (s->in_flight > 0) { |
462 | bd48bde8 | Paolo Bonzini | /* We get here only if something went wrong. Either the job failed,
|
463 | bd48bde8 | Paolo Bonzini | * or it was cancelled prematurely so that we do not guarantee that
|
464 | bd48bde8 | Paolo Bonzini | * the target is a copy of the source.
|
465 | bd48bde8 | Paolo Bonzini | */
|
466 | bd48bde8 | Paolo Bonzini | assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
|
467 | bd48bde8 | Paolo Bonzini | mirror_drain(s); |
468 | bd48bde8 | Paolo Bonzini | } |
469 | bd48bde8 | Paolo Bonzini | |
470 | bd48bde8 | Paolo Bonzini | assert(s->in_flight == 0);
|
471 | 7191bf31 | Markus Armbruster | qemu_vfree(s->buf); |
472 | b812f671 | Paolo Bonzini | g_free(s->cow_bitmap); |
473 | 402a4741 | Paolo Bonzini | g_free(s->in_flight_bitmap); |
474 | 50717e94 | Paolo Bonzini | bdrv_set_dirty_tracking(bs, 0);
|
475 | b952b558 | Paolo Bonzini | bdrv_iostatus_disable(s->target); |
476 | d63ffd87 | Paolo Bonzini | if (s->should_complete && ret == 0) { |
477 | d63ffd87 | Paolo Bonzini | if (bdrv_get_flags(s->target) != bdrv_get_flags(s->common.bs)) {
|
478 | d63ffd87 | Paolo Bonzini | bdrv_reopen(s->target, bdrv_get_flags(s->common.bs), NULL);
|
479 | d63ffd87 | Paolo Bonzini | } |
480 | d63ffd87 | Paolo Bonzini | bdrv_swap(s->target, s->common.bs); |
481 | d63ffd87 | Paolo Bonzini | } |
482 | 893f7eba | Paolo Bonzini | bdrv_close(s->target); |
483 | 4f6fd349 | Fam Zheng | bdrv_unref(s->target); |
484 | 893f7eba | Paolo Bonzini | block_job_completed(&s->common, ret); |
485 | 893f7eba | Paolo Bonzini | } |
486 | 893f7eba | Paolo Bonzini | |
487 | 893f7eba | Paolo Bonzini | static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp) |
488 | 893f7eba | Paolo Bonzini | { |
489 | 893f7eba | Paolo Bonzini | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); |
490 | 893f7eba | Paolo Bonzini | |
491 | 893f7eba | Paolo Bonzini | if (speed < 0) { |
492 | 893f7eba | Paolo Bonzini | error_set(errp, QERR_INVALID_PARAMETER, "speed");
|
493 | 893f7eba | Paolo Bonzini | return;
|
494 | 893f7eba | Paolo Bonzini | } |
495 | 893f7eba | Paolo Bonzini | ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); |
496 | 893f7eba | Paolo Bonzini | } |
497 | 893f7eba | Paolo Bonzini | |
498 | b952b558 | Paolo Bonzini | static void mirror_iostatus_reset(BlockJob *job) |
499 | b952b558 | Paolo Bonzini | { |
500 | b952b558 | Paolo Bonzini | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); |
501 | b952b558 | Paolo Bonzini | |
502 | b952b558 | Paolo Bonzini | bdrv_iostatus_reset(s->target); |
503 | b952b558 | Paolo Bonzini | } |
504 | b952b558 | Paolo Bonzini | |
505 | d63ffd87 | Paolo Bonzini | static void mirror_complete(BlockJob *job, Error **errp) |
506 | d63ffd87 | Paolo Bonzini | { |
507 | d63ffd87 | Paolo Bonzini | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); |
508 | 34b5d2c6 | Max Reitz | Error *local_err = NULL;
|
509 | d63ffd87 | Paolo Bonzini | int ret;
|
510 | d63ffd87 | Paolo Bonzini | |
511 | 34b5d2c6 | Max Reitz | ret = bdrv_open_backing_file(s->target, NULL, &local_err);
|
512 | d63ffd87 | Paolo Bonzini | if (ret < 0) { |
513 | d63ffd87 | Paolo Bonzini | char backing_filename[PATH_MAX];
|
514 | d63ffd87 | Paolo Bonzini | bdrv_get_full_backing_filename(s->target, backing_filename, |
515 | d63ffd87 | Paolo Bonzini | sizeof(backing_filename));
|
516 | 34b5d2c6 | Max Reitz | error_propagate(errp, local_err); |
517 | d63ffd87 | Paolo Bonzini | return;
|
518 | d63ffd87 | Paolo Bonzini | } |
519 | d63ffd87 | Paolo Bonzini | if (!s->synced) {
|
520 | d63ffd87 | Paolo Bonzini | error_set(errp, QERR_BLOCK_JOB_NOT_READY, job->bs->device_name); |
521 | d63ffd87 | Paolo Bonzini | return;
|
522 | d63ffd87 | Paolo Bonzini | } |
523 | d63ffd87 | Paolo Bonzini | |
524 | d63ffd87 | Paolo Bonzini | s->should_complete = true;
|
525 | d63ffd87 | Paolo Bonzini | block_job_resume(job); |
526 | d63ffd87 | Paolo Bonzini | } |
527 | d63ffd87 | Paolo Bonzini | |
528 | f59fee8d | Kevin Wolf | static const BlockJobType mirror_job_type = { |
529 | 893f7eba | Paolo Bonzini | .instance_size = sizeof(MirrorBlockJob),
|
530 | 893f7eba | Paolo Bonzini | .job_type = "mirror",
|
531 | 893f7eba | Paolo Bonzini | .set_speed = mirror_set_speed, |
532 | b952b558 | Paolo Bonzini | .iostatus_reset= mirror_iostatus_reset, |
533 | d63ffd87 | Paolo Bonzini | .complete = mirror_complete, |
534 | 893f7eba | Paolo Bonzini | }; |
535 | 893f7eba | Paolo Bonzini | |
536 | 893f7eba | Paolo Bonzini | void mirror_start(BlockDriverState *bs, BlockDriverState *target,
|
537 | 08e4ed6c | Paolo Bonzini | int64_t speed, int64_t granularity, int64_t buf_size, |
538 | 08e4ed6c | Paolo Bonzini | MirrorSyncMode mode, BlockdevOnError on_source_error, |
539 | b952b558 | Paolo Bonzini | BlockdevOnError on_target_error, |
540 | 893f7eba | Paolo Bonzini | BlockDriverCompletionFunc *cb, |
541 | 893f7eba | Paolo Bonzini | void *opaque, Error **errp)
|
542 | 893f7eba | Paolo Bonzini | { |
543 | 893f7eba | Paolo Bonzini | MirrorBlockJob *s; |
544 | 893f7eba | Paolo Bonzini | |
545 | eee13dfe | Paolo Bonzini | if (granularity == 0) { |
546 | eee13dfe | Paolo Bonzini | /* Choose the default granularity based on the target file's cluster
|
547 | eee13dfe | Paolo Bonzini | * size, clamped between 4k and 64k. */
|
548 | eee13dfe | Paolo Bonzini | BlockDriverInfo bdi; |
549 | eee13dfe | Paolo Bonzini | if (bdrv_get_info(target, &bdi) >= 0 && bdi.cluster_size != 0) { |
550 | eee13dfe | Paolo Bonzini | granularity = MAX(4096, bdi.cluster_size);
|
551 | eee13dfe | Paolo Bonzini | granularity = MIN(65536, granularity);
|
552 | eee13dfe | Paolo Bonzini | } else {
|
553 | eee13dfe | Paolo Bonzini | granularity = 65536;
|
554 | eee13dfe | Paolo Bonzini | } |
555 | eee13dfe | Paolo Bonzini | } |
556 | eee13dfe | Paolo Bonzini | |
557 | eee13dfe | Paolo Bonzini | assert ((granularity & (granularity - 1)) == 0); |
558 | eee13dfe | Paolo Bonzini | |
559 | b952b558 | Paolo Bonzini | if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
|
560 | b952b558 | Paolo Bonzini | on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) && |
561 | b952b558 | Paolo Bonzini | !bdrv_iostatus_is_enabled(bs)) { |
562 | b952b558 | Paolo Bonzini | error_set(errp, QERR_INVALID_PARAMETER, "on-source-error");
|
563 | b952b558 | Paolo Bonzini | return;
|
564 | b952b558 | Paolo Bonzini | } |
565 | b952b558 | Paolo Bonzini | |
566 | 893f7eba | Paolo Bonzini | s = block_job_create(&mirror_job_type, bs, speed, cb, opaque, errp); |
567 | 893f7eba | Paolo Bonzini | if (!s) {
|
568 | 893f7eba | Paolo Bonzini | return;
|
569 | 893f7eba | Paolo Bonzini | } |
570 | 893f7eba | Paolo Bonzini | |
571 | b952b558 | Paolo Bonzini | s->on_source_error = on_source_error; |
572 | b952b558 | Paolo Bonzini | s->on_target_error = on_target_error; |
573 | 893f7eba | Paolo Bonzini | s->target = target; |
574 | 893f7eba | Paolo Bonzini | s->mode = mode; |
575 | eee13dfe | Paolo Bonzini | s->granularity = granularity; |
576 | 08e4ed6c | Paolo Bonzini | s->buf_size = MAX(buf_size, granularity); |
577 | b812f671 | Paolo Bonzini | |
578 | eee13dfe | Paolo Bonzini | bdrv_set_dirty_tracking(bs, granularity); |
579 | 893f7eba | Paolo Bonzini | bdrv_set_enable_write_cache(s->target, true);
|
580 | b952b558 | Paolo Bonzini | bdrv_set_on_error(s->target, on_target_error, on_target_error); |
581 | b952b558 | Paolo Bonzini | bdrv_iostatus_enable(s->target); |
582 | 893f7eba | Paolo Bonzini | s->common.co = qemu_coroutine_create(mirror_run); |
583 | 893f7eba | Paolo Bonzini | trace_mirror_start(bs, s, s->common.co, opaque); |
584 | 893f7eba | Paolo Bonzini | qemu_coroutine_enter(s->common.co, s); |
585 | 893f7eba | Paolo Bonzini | } |