Revision 33656af7 block-migration.c
b/block-migration.c | ||
---|---|---|
49 | 49 |
int64_t total_sectors; |
50 | 50 |
int64_t dirty; |
51 | 51 |
QSIMPLEQ_ENTRY(BlkMigDevState) entry; |
52 |
unsigned long *aio_bitmap; |
|
52 | 53 |
} BlkMigDevState; |
53 | 54 |
|
54 | 55 |
typedef struct BlkMigBlock { |
55 | 56 |
uint8_t *buf; |
56 | 57 |
BlkMigDevState *bmds; |
57 | 58 |
int64_t sector; |
59 |
int nr_sectors; |
|
58 | 60 |
struct iovec iov; |
59 | 61 |
QEMUIOVector qiov; |
60 | 62 |
BlockDriverAIOCB *aiocb; |
... | ... | |
140 | 142 |
return (block_mig_state.reads * BLOCK_SIZE)/ block_mig_state.total_time; |
141 | 143 |
} |
142 | 144 |
|
145 |
static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector) |
|
146 |
{ |
|
147 |
int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK; |
|
148 |
|
|
149 |
if (bmds->aio_bitmap && |
|
150 |
(sector << BDRV_SECTOR_BITS) < bdrv_getlength(bmds->bs)) { |
|
151 |
return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] & |
|
152 |
(1UL << (chunk % (sizeof(unsigned long) * 8)))); |
|
153 |
} else { |
|
154 |
return 0; |
|
155 |
} |
|
156 |
} |
|
157 |
|
|
158 |
static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num, |
|
159 |
int nb_sectors, int set) |
|
160 |
{ |
|
161 |
int64_t start, end; |
|
162 |
unsigned long val, idx, bit; |
|
163 |
|
|
164 |
start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK; |
|
165 |
end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK; |
|
166 |
|
|
167 |
for (; start <= end; start++) { |
|
168 |
idx = start / (sizeof(unsigned long) * 8); |
|
169 |
bit = start % (sizeof(unsigned long) * 8); |
|
170 |
val = bmds->aio_bitmap[idx]; |
|
171 |
if (set) { |
|
172 |
if (!(val & (1UL << bit))) { |
|
173 |
val |= 1UL << bit; |
|
174 |
} |
|
175 |
} else { |
|
176 |
if (val & (1UL << bit)) { |
|
177 |
val &= ~(1UL << bit); |
|
178 |
} |
|
179 |
} |
|
180 |
bmds->aio_bitmap[idx] = val; |
|
181 |
} |
|
182 |
} |
|
183 |
|
|
184 |
static void alloc_aio_bitmap(BlkMigDevState *bmds) |
|
185 |
{ |
|
186 |
BlockDriverState *bs = bmds->bs; |
|
187 |
int64_t bitmap_size; |
|
188 |
|
|
189 |
bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) + |
|
190 |
BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1; |
|
191 |
bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8; |
|
192 |
|
|
193 |
bmds->aio_bitmap = qemu_mallocz(bitmap_size); |
|
194 |
} |
|
195 |
|
|
143 | 196 |
static void blk_mig_read_cb(void *opaque, int ret) |
144 | 197 |
{ |
145 | 198 |
BlkMigBlock *blk = opaque; |
... | ... | |
151 | 204 |
add_avg_read_time(blk->time); |
152 | 205 |
|
153 | 206 |
QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry); |
207 |
bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0); |
|
154 | 208 |
|
155 | 209 |
block_mig_state.submitted--; |
156 | 210 |
block_mig_state.read_done++; |
... | ... | |
194 | 248 |
blk->buf = qemu_malloc(BLOCK_SIZE); |
195 | 249 |
blk->bmds = bmds; |
196 | 250 |
blk->sector = cur_sector; |
251 |
blk->nr_sectors = nr_sectors; |
|
197 | 252 |
|
198 | 253 |
blk->iov.iov_base = blk->buf; |
199 | 254 |
blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE; |
... | ... | |
248 | 303 |
bmds->total_sectors = sectors; |
249 | 304 |
bmds->completed_sectors = 0; |
250 | 305 |
bmds->shared_base = block_mig_state.shared_base; |
306 |
alloc_aio_bitmap(bmds); |
|
251 | 307 |
|
252 | 308 |
block_mig_state.total_sector_sum += sectors; |
253 | 309 |
|
... | ... | |
329 | 385 |
int nr_sectors; |
330 | 386 |
|
331 | 387 |
for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) { |
388 |
if (bmds_aio_inflight(bmds, sector)) |
|
389 |
qemu_aio_flush(); |
|
332 | 390 |
if (bdrv_get_dirty(bmds->bs, sector)) { |
333 | 391 |
|
334 | 392 |
if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) { |
... | ... | |
340 | 398 |
blk->buf = qemu_malloc(BLOCK_SIZE); |
341 | 399 |
blk->bmds = bmds; |
342 | 400 |
blk->sector = sector; |
401 |
blk->nr_sectors = nr_sectors; |
|
343 | 402 |
|
344 | 403 |
if (is_async) { |
345 | 404 |
blk->iov.iov_base = blk->buf; |
... | ... | |
354 | 413 |
goto error; |
355 | 414 |
} |
356 | 415 |
block_mig_state.submitted++; |
416 |
bmds_set_aio_inflight(bmds, sector, nr_sectors, 1); |
|
357 | 417 |
} else { |
358 | 418 |
if (bdrv_read(bmds->bs, sector, blk->buf, |
359 | 419 |
nr_sectors) < 0) { |
... | ... | |
474 | 534 |
|
475 | 535 |
while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) { |
476 | 536 |
QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry); |
537 |
qemu_free(bmds->aio_bitmap); |
|
477 | 538 |
qemu_free(bmds); |
478 | 539 |
} |
479 | 540 |
|
Also available in: Unified diff