21 |
21 |
#define SECTOR_SIZE (1 << SECTOR_BITS)
|
22 |
22 |
#define SECTOR_MASK ~(SECTOR_SIZE - 1);
|
23 |
23 |
|
24 |
|
#define BLOCK_SIZE (block_mig_state->sectors_per_block << SECTOR_BITS)
|
|
24 |
#define BLOCK_SIZE (block_mig_state->sectors_per_block << SECTOR_BITS)
|
25 |
25 |
|
26 |
26 |
#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
|
27 |
27 |
#define BLK_MIG_FLAG_EOS 0x02
|
... | ... | |
34 |
34 |
//#define DEBUG_BLK_MIGRATION
|
35 |
35 |
|
36 |
36 |
#ifdef DEBUG_BLK_MIGRATION
|
37 |
|
#define dprintf(fmt, ...) \
|
|
37 |
#define dprintf(fmt, ...) \
|
38 |
38 |
do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
|
39 |
39 |
#else
|
40 |
|
#define dprintf(fmt, ...) \
|
|
40 |
#define dprintf(fmt, ...) \
|
41 |
41 |
do { } while (0)
|
42 |
42 |
#endif
|
43 |
43 |
|
|
44 |
typedef struct BlkMigDevState {
|
|
45 |
BlockDriverState *bs;
|
|
46 |
int bulk_completed;
|
|
47 |
int shared_base;
|
|
48 |
struct BlkMigDevState *next;
|
|
49 |
int64_t cur_sector;
|
|
50 |
int64_t total_sectors;
|
|
51 |
int64_t dirty;
|
|
52 |
} BlkMigDevState;
|
|
53 |
|
44 |
54 |
typedef struct BlkMigBlock {
|
45 |
55 |
uint8_t *buf;
|
46 |
56 |
BlkMigDevState *bmds;
|
... | ... | |
68 |
78 |
int64_t print_completion;
|
69 |
79 |
} BlkMigState;
|
70 |
80 |
|
71 |
|
static BlkMigState *block_mig_state = NULL;
|
|
81 |
static BlkMigState *block_mig_state = NULL;
|
72 |
82 |
|
73 |
83 |
static void blk_mig_read_cb(void *opaque, int ret)
|
74 |
84 |
{
|
75 |
85 |
BlkMigBlock *blk = opaque;
|
76 |
|
|
|
86 |
|
77 |
87 |
blk->ret = ret;
|
78 |
|
|
|
88 |
|
79 |
89 |
/* insert at the end */
|
80 |
|
if(block_mig_state->last_blk == NULL) {
|
|
90 |
if (block_mig_state->last_blk == NULL) {
|
81 |
91 |
block_mig_state->first_blk = blk;
|
82 |
92 |
block_mig_state->last_blk = blk;
|
83 |
93 |
} else {
|
84 |
94 |
block_mig_state->last_blk->next = blk;
|
85 |
95 |
block_mig_state->last_blk = blk;
|
86 |
96 |
}
|
87 |
|
|
|
97 |
|
88 |
98 |
block_mig_state->submitted--;
|
89 |
99 |
block_mig_state->read_done++;
|
90 |
100 |
assert(block_mig_state->submitted >= 0);
|
91 |
|
|
92 |
|
return;
|
93 |
101 |
}
|
94 |
102 |
|
95 |
103 |
static int mig_read_device_bulk(QEMUFile *f, BlkMigDevState *bms)
|
96 |
|
{
|
|
104 |
{
|
97 |
105 |
int nr_sectors;
|
98 |
106 |
int64_t total_sectors, cur_sector = 0;
|
99 |
107 |
BlockDriverState *bs = bms->bs;
|
100 |
108 |
BlkMigBlock *blk;
|
101 |
|
|
|
109 |
|
102 |
110 |
blk = qemu_malloc(sizeof(BlkMigBlock));
|
103 |
111 |
blk->buf = qemu_malloc(BLOCK_SIZE);
|
104 |
|
|
|
112 |
|
105 |
113 |
cur_sector = bms->cur_sector;
|
106 |
114 |
total_sectors = bdrv_getlength(bs) >> SECTOR_BITS;
|
107 |
|
|
108 |
|
if(bms->shared_base) {
|
109 |
|
while(cur_sector < bms->total_sectors &&
|
110 |
|
!bdrv_is_allocated(bms->bs, cur_sector,
|
111 |
|
MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) {
|
|
115 |
|
|
116 |
if (bms->shared_base) {
|
|
117 |
while (cur_sector < bms->total_sectors &&
|
|
118 |
!bdrv_is_allocated(bms->bs, cur_sector,
|
|
119 |
MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) {
|
112 |
120 |
cur_sector += nr_sectors;
|
113 |
121 |
}
|
114 |
122 |
}
|
115 |
|
|
116 |
|
if(cur_sector >= total_sectors) {
|
|
123 |
|
|
124 |
if (cur_sector >= total_sectors) {
|
117 |
125 |
bms->cur_sector = total_sectors;
|
118 |
126 |
qemu_free(blk->buf);
|
119 |
127 |
qemu_free(blk);
|
120 |
128 |
return 1;
|
121 |
129 |
}
|
122 |
|
|
123 |
|
if(cur_sector >= block_mig_state->print_completion) {
|
|
130 |
|
|
131 |
if (cur_sector >= block_mig_state->print_completion) {
|
124 |
132 |
printf("Completed %" PRId64 " %%\r", cur_sector * 100 / total_sectors);
|
125 |
133 |
fflush(stdout);
|
126 |
|
block_mig_state->print_completion +=
|
|
134 |
block_mig_state->print_completion +=
|
127 |
135 |
(block_mig_state->sectors_per_block * 10000);
|
128 |
136 |
}
|
129 |
|
|
|
137 |
|
130 |
138 |
/* we going to transfder BLOCK_SIZE any way even if it is not allocated */
|
131 |
139 |
nr_sectors = block_mig_state->sectors_per_block;
|
132 |
140 |
|
133 |
141 |
cur_sector &= ~((int64_t)block_mig_state->sectors_per_block -1);
|
134 |
|
|
135 |
|
if(total_sectors - cur_sector < block_mig_state->sectors_per_block) {
|
|
142 |
|
|
143 |
if (total_sectors - cur_sector < block_mig_state->sectors_per_block) {
|
136 |
144 |
nr_sectors = (total_sectors - cur_sector);
|
137 |
145 |
}
|
138 |
|
|
|
146 |
|
139 |
147 |
bms->cur_sector = cur_sector + nr_sectors;
|
140 |
148 |
blk->sector = cur_sector;
|
141 |
149 |
blk->bmds = bms;
|
142 |
150 |
blk->next = NULL;
|
143 |
|
|
|
151 |
|
144 |
152 |
blk->iov.iov_base = blk->buf;
|
145 |
153 |
blk->iov.iov_len = nr_sectors * SECTOR_SIZE;
|
146 |
154 |
qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
|
147 |
|
|
|
155 |
|
148 |
156 |
blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
|
149 |
157 |
nr_sectors, blk_mig_read_cb, blk);
|
150 |
|
|
151 |
|
if(!blk->aiocb) {
|
|
158 |
|
|
159 |
if (!blk->aiocb) {
|
152 |
160 |
printf("Error reading sector %" PRId64 "\n", cur_sector);
|
153 |
161 |
qemu_free(blk->buf);
|
154 |
162 |
qemu_free(blk);
|
... | ... | |
157 |
165 |
|
158 |
166 |
bdrv_reset_dirty(bms->bs, cur_sector, nr_sectors);
|
159 |
167 |
block_mig_state->submitted++;
|
160 |
|
|
|
168 |
|
161 |
169 |
return (bms->cur_sector >= total_sectors);
|
162 |
170 |
}
|
163 |
171 |
|
164 |
172 |
static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
|
165 |
|
{
|
|
173 |
{
|
166 |
174 |
int len, nr_sectors;
|
167 |
175 |
int64_t total_sectors = bmds->total_sectors, cur_sector = 0;
|
168 |
176 |
uint8_t *tmp_buf = NULL;
|
169 |
177 |
BlockDriverState *bs = bmds->bs;
|
170 |
178 |
|
171 |
179 |
tmp_buf = qemu_malloc(BLOCK_SIZE);
|
172 |
|
|
|
180 |
|
173 |
181 |
cur_sector = bmds->cur_sector;
|
174 |
|
|
175 |
|
if(bmds->shared_base) {
|
176 |
|
while(cur_sector < bmds->total_sectors &&
|
177 |
|
!bdrv_is_allocated(bmds->bs, cur_sector,
|
178 |
|
MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) {
|
|
182 |
|
|
183 |
if (bmds->shared_base) {
|
|
184 |
while (cur_sector < bmds->total_sectors &&
|
|
185 |
!bdrv_is_allocated(bmds->bs, cur_sector,
|
|
186 |
MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) {
|
179 |
187 |
cur_sector += nr_sectors;
|
180 |
188 |
}
|
181 |
189 |
}
|
182 |
|
|
183 |
|
if(cur_sector >= total_sectors) {
|
|
190 |
|
|
191 |
if (cur_sector >= total_sectors) {
|
184 |
192 |
bmds->cur_sector = total_sectors;
|
185 |
193 |
qemu_free(tmp_buf);
|
186 |
194 |
return 1;
|
187 |
195 |
}
|
188 |
|
|
189 |
|
if(cur_sector >= block_mig_state->print_completion) {
|
|
196 |
|
|
197 |
if (cur_sector >= block_mig_state->print_completion) {
|
190 |
198 |
printf("Completed %" PRId64 " %%\r", cur_sector * 100 / total_sectors);
|
191 |
199 |
fflush(stdout);
|
192 |
|
block_mig_state->print_completion +=
|
|
200 |
block_mig_state->print_completion +=
|
193 |
201 |
(block_mig_state->sectors_per_block * 10000);
|
194 |
202 |
}
|
195 |
|
|
|
203 |
|
196 |
204 |
cur_sector &= ~((int64_t)block_mig_state->sectors_per_block -1);
|
197 |
|
|
198 |
|
/* we going to transfer
|
199 |
|
BLOCK_SIZE
|
200 |
|
any way even if it is not allocated */
|
|
205 |
|
|
206 |
/* we going to transfer BLOCK_SIZE any way even if it is not allocated */
|
201 |
207 |
nr_sectors = block_mig_state->sectors_per_block;
|
202 |
|
|
203 |
|
if(total_sectors - cur_sector < block_mig_state->sectors_per_block) {
|
|
208 |
|
|
209 |
if (total_sectors - cur_sector < block_mig_state->sectors_per_block) {
|
204 |
210 |
nr_sectors = (total_sectors - cur_sector);
|
205 |
211 |
}
|
206 |
|
|
207 |
|
if(bdrv_read(bs, cur_sector, tmp_buf, nr_sectors) < 0) {
|
|
212 |
|
|
213 |
if (bdrv_read(bs, cur_sector, tmp_buf, nr_sectors) < 0) {
|
208 |
214 |
printf("Error reading sector %" PRId64 "\n", cur_sector);
|
209 |
215 |
}
|
210 |
216 |
|
211 |
217 |
bdrv_reset_dirty(bs, cur_sector, nr_sectors);
|
212 |
|
|
213 |
|
/* Device name */
|
214 |
|
qemu_put_be64(f,(cur_sector << SECTOR_BITS) | BLK_MIG_FLAG_DEVICE_BLOCK);
|
215 |
|
|
|
218 |
|
|
219 |
/* sector number and flags */
|
|
220 |
qemu_put_be64(f, (cur_sector << SECTOR_BITS) | BLK_MIG_FLAG_DEVICE_BLOCK);
|
|
221 |
|
|
222 |
/* device name */
|
216 |
223 |
len = strlen(bs->device_name);
|
217 |
224 |
qemu_put_byte(f, len);
|
218 |
225 |
qemu_put_buffer(f, (uint8_t *)bs->device_name, len);
|
219 |
|
|
220 |
|
qemu_put_buffer(f, tmp_buf,
|
221 |
|
BLOCK_SIZE);
|
222 |
|
|
|
226 |
|
|
227 |
qemu_put_buffer(f, tmp_buf, BLOCK_SIZE);
|
|
228 |
|
223 |
229 |
bmds->cur_sector = cur_sector + block_mig_state->sectors_per_block;
|
224 |
|
|
|
230 |
|
225 |
231 |
qemu_free(tmp_buf);
|
226 |
|
|
|
232 |
|
227 |
233 |
return (bmds->cur_sector >= total_sectors);
|
228 |
234 |
}
|
229 |
235 |
|
230 |
236 |
static void send_blk(QEMUFile *f, BlkMigBlock * blk)
|
231 |
237 |
{
|
232 |
238 |
int len;
|
233 |
|
|
234 |
|
/* Device name */
|
235 |
|
qemu_put_be64(f,(blk->sector << SECTOR_BITS) | BLK_MIG_FLAG_DEVICE_BLOCK);
|
236 |
|
|
|
239 |
|
|
240 |
/* sector number and flags */
|
|
241 |
qemu_put_be64(f, (blk->sector << SECTOR_BITS) | BLK_MIG_FLAG_DEVICE_BLOCK);
|
|
242 |
|
|
243 |
/* device name */
|
237 |
244 |
len = strlen(blk->bmds->bs->device_name);
|
238 |
245 |
qemu_put_byte(f, len);
|
239 |
246 |
qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
|
240 |
|
|
241 |
|
qemu_put_buffer(f, blk->buf,
|
242 |
|
BLOCK_SIZE);
|
243 |
|
|
244 |
|
return;
|
|
247 |
|
|
248 |
qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
|
245 |
249 |
}
|
246 |
250 |
|
247 |
251 |
static void blk_mig_save_dev_info(QEMUFile *f, BlkMigDevState *bmds)
|
... | ... | |
251 |
255 |
static void set_dirty_tracking(int enable)
|
252 |
256 |
{
|
253 |
257 |
BlkMigDevState *bmds;
|
254 |
|
for(bmds = block_mig_state->bmds_first; bmds != NULL; bmds = bmds->next) {
|
255 |
|
bdrv_set_dirty_tracking(bmds->bs,enable);
|
|
258 |
for (bmds = block_mig_state->bmds_first; bmds != NULL; bmds = bmds->next) {
|
|
259 |
bdrv_set_dirty_tracking(bmds->bs, enable);
|
256 |
260 |
}
|
257 |
|
|
258 |
|
return;
|
259 |
261 |
}
|
260 |
262 |
|
261 |
263 |
static void init_blk_migration(QEMUFile *f)
|
262 |
264 |
{
|
263 |
265 |
BlkMigDevState **pbmds, *bmds;
|
264 |
266 |
BlockDriverState *bs;
|
265 |
|
|
|
267 |
|
266 |
268 |
for (bs = bdrv_first; bs != NULL; bs = bs->next) {
|
267 |
|
if(bs->type == BDRV_TYPE_HD) {
|
|
269 |
if (bs->type == BDRV_TYPE_HD) {
|
268 |
270 |
bmds = qemu_mallocz(sizeof(BlkMigDevState));
|
269 |
271 |
bmds->bs = bs;
|
270 |
272 |
bmds->bulk_completed = 0;
|
271 |
273 |
bmds->total_sectors = bdrv_getlength(bs) >> SECTOR_BITS;
|
272 |
274 |
bmds->shared_base = block_mig_state->shared_base;
|
273 |
|
|
274 |
|
if(bmds->shared_base) {
|
275 |
|
printf("Start migration for %s with shared base image\n",
|
|
275 |
|
|
276 |
if (bmds->shared_base) {
|
|
277 |
printf("Start migration for %s with shared base image\n",
|
276 |
278 |
bs->device_name);
|
277 |
279 |
} else {
|
278 |
280 |
printf("Start full migration for %s\n", bs->device_name);
|
279 |
281 |
}
|
280 |
|
|
|
282 |
|
281 |
283 |
/* insert at the end */
|
282 |
284 |
pbmds = &block_mig_state->bmds_first;
|
283 |
|
while (*pbmds != NULL)
|
|
285 |
while (*pbmds != NULL) {
|
284 |
286 |
pbmds = &(*pbmds)->next;
|
|
287 |
}
|
285 |
288 |
*pbmds = bmds;
|
286 |
|
|
|
289 |
|
287 |
290 |
blk_mig_save_dev_info(f, bmds);
|
288 |
|
|
289 |
291 |
}
|
290 |
|
}
|
291 |
|
|
|
292 |
}
|
|
293 |
|
292 |
294 |
block_mig_state->sectors_per_block = bdrv_get_sectors_per_chunk();
|
293 |
|
|
294 |
|
return;
|
295 |
295 |
}
|
296 |
296 |
|
297 |
297 |
static int blk_mig_save_bulked_block(QEMUFile *f, int is_async)
|
... | ... | |
299 |
299 |
BlkMigDevState *bmds;
|
300 |
300 |
|
301 |
301 |
for (bmds = block_mig_state->bmds_first; bmds != NULL; bmds = bmds->next) {
|
302 |
|
if(bmds->bulk_completed == 0) {
|
303 |
|
if(is_async) {
|
304 |
|
if(mig_read_device_bulk(f, bmds) == 1) {
|
|
302 |
if (bmds->bulk_completed == 0) {
|
|
303 |
if (is_async) {
|
|
304 |
if (mig_read_device_bulk(f, bmds) == 1) {
|
305 |
305 |
/* completed bulk section for this device */
|
306 |
306 |
bmds->bulk_completed = 1;
|
307 |
307 |
}
|
308 |
308 |
} else {
|
309 |
|
if(mig_save_device_bulk(f,bmds) == 1) {
|
|
309 |
if (mig_save_device_bulk(f, bmds) == 1) {
|
310 |
310 |
/* completed bulk section for this device */
|
311 |
311 |
bmds->bulk_completed = 1;
|
312 |
312 |
}
|
... | ... | |
314 |
314 |
return 1;
|
315 |
315 |
}
|
316 |
316 |
}
|
317 |
|
|
|
317 |
|
318 |
318 |
/* we reached here means bulk is completed */
|
319 |
319 |
block_mig_state->bulk_completed = 1;
|
320 |
|
|
|
320 |
|
321 |
321 |
return 0;
|
322 |
|
|
323 |
322 |
}
|
324 |
323 |
|
325 |
324 |
#define MAX_NUM_BLOCKS 4
|
... | ... | |
330 |
329 |
uint8_t buf[BLOCK_SIZE];
|
331 |
330 |
int64_t sector;
|
332 |
331 |
int len;
|
333 |
|
|
334 |
|
for(bmds = block_mig_state->bmds_first; bmds != NULL; bmds = bmds->next) {
|
335 |
|
for(sector = 0; sector < bmds->cur_sector;) {
|
336 |
|
|
337 |
|
if(bdrv_get_dirty(bmds->bs,sector)) {
|
338 |
|
|
339 |
|
if(bdrv_read(bmds->bs, sector, buf,
|
340 |
|
block_mig_state->sectors_per_block) < 0) {
|
|
332 |
|
|
333 |
for (bmds = block_mig_state->bmds_first; bmds != NULL; bmds = bmds->next) {
|
|
334 |
for (sector = 0; sector < bmds->cur_sector;) {
|
|
335 |
if (bdrv_get_dirty(bmds->bs, sector)) {
|
|
336 |
if (bdrv_read(bmds->bs, sector, buf,
|
|
337 |
block_mig_state->sectors_per_block) < 0) {
|
|
338 |
/* FIXME: add error handling */
|
341 |
339 |
}
|
342 |
|
|
|
340 |
|
|
341 |
/* sector number and flags */
|
|
342 |
qemu_put_be64(f, (sector << SECTOR_BITS)
|
|
343 |
| BLK_MIG_FLAG_DEVICE_BLOCK);
|
|
344 |
|
343 |
345 |
/* device name */
|
344 |
|
qemu_put_be64(f,(sector << SECTOR_BITS)
|
345 |
|
| BLK_MIG_FLAG_DEVICE_BLOCK);
|
346 |
|
|
347 |
346 |
len = strlen(bmds->bs->device_name);
|
348 |
|
|
349 |
347 |
qemu_put_byte(f, len);
|
350 |
348 |
qemu_put_buffer(f, (uint8_t *)bmds->bs->device_name, len);
|
351 |
|
|
352 |
|
qemu_put_buffer(f, buf,
|
353 |
|
(block_mig_state->sectors_per_block *
|
|
349 |
|
|
350 |
qemu_put_buffer(f, buf,
|
|
351 |
(block_mig_state->sectors_per_block *
|
354 |
352 |
SECTOR_SIZE));
|
355 |
|
|
356 |
|
bdrv_reset_dirty(bmds->bs, sector,
|
|
353 |
|
|
354 |
bdrv_reset_dirty(bmds->bs, sector,
|
357 |
355 |
block_mig_state->sectors_per_block);
|
358 |
|
|
359 |
|
sector += block_mig_state->sectors_per_block;
|
360 |
|
} else {
|
361 |
|
/* sector is clean */
|
362 |
|
sector += block_mig_state->sectors_per_block;
|
363 |
|
}
|
|
356 |
}
|
|
357 |
sector += block_mig_state->sectors_per_block;
|
364 |
358 |
}
|
365 |
359 |
}
|
366 |
|
|
367 |
|
return;
|
368 |
360 |
}
|
369 |
361 |
|
370 |
362 |
static void flush_blks(QEMUFile* f)
|
371 |
363 |
{
|
372 |
|
BlkMigBlock *blk, *tmp;
|
373 |
|
|
374 |
|
dprintf("%s Enter submitted %d read_done %d transfered\n", __FUNCTION__,
|
|
364 |
BlkMigBlock *blk, *next;
|
|
365 |
|
|
366 |
dprintf("%s Enter submitted %d read_done %d transfered\n", __FUNCTION__,
|
375 |
367 |
submitted, read_done, transfered);
|
376 |
|
|
377 |
|
for(blk = block_mig_state->first_blk;
|
378 |
|
blk != NULL && !qemu_file_rate_limit(f); blk = tmp) {
|
|
368 |
|
|
369 |
for (blk = block_mig_state->first_blk;
|
|
370 |
blk != NULL && !qemu_file_rate_limit(f);
|
|
371 |
blk = next) {
|
379 |
372 |
send_blk(f, blk);
|
380 |
|
|
381 |
|
tmp = blk->next;
|
|
373 |
|
|
374 |
next = blk->next;
|
382 |
375 |
qemu_free(blk->buf);
|
383 |
376 |
qemu_free(blk);
|
384 |
|
|
|
377 |
|
385 |
378 |
block_mig_state->read_done--;
|
386 |
379 |
block_mig_state->transferred++;
|
387 |
380 |
assert(block_mig_state->read_done >= 0);
|
388 |
381 |
}
|
389 |
382 |
block_mig_state->first_blk = blk;
|
390 |
|
|
391 |
|
if(block_mig_state->first_blk == NULL) {
|
|
383 |
|
|
384 |
if (block_mig_state->first_blk == NULL) {
|
392 |
385 |
block_mig_state->last_blk = NULL;
|
393 |
386 |
}
|
394 |
387 |
|
395 |
|
dprintf("%s Exit submitted %d read_done %d transferred%d\n", __FUNCTION__,
|
396 |
|
block_mig_state->submitted, block_mig_state->read_done,
|
|
388 |
dprintf("%s Exit submitted %d read_done %d transferred%d\n", __FUNCTION__,
|
|
389 |
block_mig_state->submitted, block_mig_state->read_done,
|
397 |
390 |
block_mig_state->transferred);
|
398 |
|
|
399 |
|
return;
|
400 |
391 |
}
|
401 |
392 |
|
402 |
393 |
static int is_stage2_completed(void)
|
403 |
394 |
{
|
404 |
395 |
BlkMigDevState *bmds;
|
405 |
|
|
406 |
|
if(block_mig_state->submitted > 0) {
|
|
396 |
|
|
397 |
if (block_mig_state->submitted > 0) {
|
407 |
398 |
return 0;
|
408 |
399 |
}
|
409 |
|
|
|
400 |
|
410 |
401 |
for (bmds = block_mig_state->bmds_first; bmds != NULL; bmds = bmds->next) {
|
411 |
|
if(bmds->bulk_completed == 0) {
|
|
402 |
if (bmds->bulk_completed == 0) {
|
412 |
403 |
return 0;
|
413 |
404 |
}
|
414 |
405 |
}
|
415 |
|
|
|
406 |
|
416 |
407 |
return 1;
|
417 |
408 |
}
|
418 |
409 |
|
419 |
410 |
static int block_save_live(QEMUFile *f, int stage, void *opaque)
|
420 |
411 |
{
|
421 |
|
int ret = 1;
|
422 |
|
|
423 |
|
dprintf("Enter save live stage %d submitted %d transferred %d\n", stage,
|
|
412 |
dprintf("Enter save live stage %d submitted %d transferred %d\n", stage,
|
424 |
413 |
submitted, transferred);
|
425 |
|
|
426 |
|
if(block_mig_state->blk_enable != 1) {
|
|
414 |
|
|
415 |
if (block_mig_state->blk_enable != 1) {
|
427 |
416 |
/* no need to migrate storage */
|
428 |
|
|
429 |
|
qemu_put_be64(f,BLK_MIG_FLAG_EOS);
|
|
417 |
qemu_put_be64(f, BLK_MIG_FLAG_EOS);
|
430 |
418 |
return 1;
|
431 |
419 |
}
|
432 |
|
|
433 |
|
if(stage == 1) {
|
|
420 |
|
|
421 |
if (stage == 1) {
|
434 |
422 |
init_blk_migration(f);
|
435 |
|
|
|
423 |
|
436 |
424 |
/* start track dirty blocks */
|
437 |
425 |
set_dirty_tracking(1);
|
438 |
|
|
439 |
426 |
}
|
440 |
427 |
|
441 |
428 |
flush_blks(f);
|
442 |
|
|
|
429 |
|
443 |
430 |
/* control the rate of transfer */
|
444 |
|
while ((block_mig_state->submitted + block_mig_state->read_done) *
|
445 |
|
(BLOCK_SIZE) <
|
446 |
|
(qemu_file_get_rate_limit(f))) {
|
447 |
|
|
448 |
|
ret = blk_mig_save_bulked_block(f, 1);
|
449 |
|
|
450 |
|
if (ret == 0) /* no more bulk blocks for now*/
|
|
431 |
while ((block_mig_state->submitted +
|
|
432 |
block_mig_state->read_done) * BLOCK_SIZE <
|
|
433 |
qemu_file_get_rate_limit(f)) {
|
|
434 |
if (blk_mig_save_bulked_block(f, 1) == 0) {
|
|
435 |
/* no more bulk blocks for now */
|
451 |
436 |
break;
|
|
437 |
}
|
452 |
438 |
}
|
453 |
|
|
|
439 |
|
454 |
440 |
flush_blks(f);
|
455 |
|
|
456 |
|
if(stage == 3) {
|
457 |
|
|
458 |
|
while(blk_mig_save_bulked_block(f, 0) != 0);
|
459 |
|
|
|
441 |
|
|
442 |
if (stage == 3) {
|
|
443 |
while (blk_mig_save_bulked_block(f, 0) != 0) {
|
|
444 |
/* empty */
|
|
445 |
}
|
|
446 |
|
460 |
447 |
blk_mig_save_dirty_blocks(f);
|
461 |
|
|
|
448 |
|
462 |
449 |
/* stop track dirty blocks */
|
463 |
|
set_dirty_tracking(0);;
|
464 |
|
|
465 |
|
printf("\nBlock migration completed\n");
|
|
450 |
set_dirty_tracking(0);
|
|
451 |
|
|
452 |
printf("\nBlock migration completed\n");
|
466 |
453 |
}
|
467 |
|
|
468 |
|
qemu_put_be64(f,BLK_MIG_FLAG_EOS);
|
469 |
|
|
|
454 |
|
|
455 |
qemu_put_be64(f, BLK_MIG_FLAG_EOS);
|
|
456 |
|
470 |
457 |
return ((stage == 2) && is_stage2_completed());
|
471 |
458 |
}
|
472 |
459 |
|
... | ... | |
477 |
464 |
int64_t addr;
|
478 |
465 |
BlockDriverState *bs;
|
479 |
466 |
uint8_t *buf;
|
480 |
|
|
|
467 |
|
481 |
468 |
block_mig_state->sectors_per_block = bdrv_get_sectors_per_chunk();
|
482 |
469 |
buf = qemu_malloc(BLOCK_SIZE);
|
483 |
|
|
|
470 |
|
484 |
471 |
do {
|
485 |
|
|
486 |
472 |
addr = qemu_get_be64(f);
|
487 |
|
|
|
473 |
|
488 |
474 |
flags = addr & ~SECTOR_MASK;
|
489 |
475 |
addr &= SECTOR_MASK;
|
490 |
|
|
491 |
|
if(flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
|
492 |
|
|
|
476 |
|
|
477 |
if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
|
493 |
478 |
/* get device name */
|
494 |
479 |
len = qemu_get_byte(f);
|
495 |
|
|
|
480 |
|
496 |
481 |
qemu_get_buffer(f, (uint8_t *)device_name, len);
|
497 |
482 |
device_name[len] = '\0';
|
498 |
|
|
|
483 |
|
499 |
484 |
bs = bdrv_find(device_name);
|
500 |
|
|
501 |
|
qemu_get_buffer(f, buf,
|
502 |
|
BLOCK_SIZE);
|
503 |
|
if(bs != NULL) {
|
504 |
|
|
505 |
|
bdrv_write(bs, (addr >> SECTOR_BITS),
|
|
485 |
|
|
486 |
qemu_get_buffer(f, buf, BLOCK_SIZE);
|
|
487 |
if (bs != NULL) {
|
|
488 |
bdrv_write(bs, (addr >> SECTOR_BITS),
|
506 |
489 |
buf, block_mig_state->sectors_per_block);
|
507 |
490 |
} else {
|
508 |
491 |
printf("Error unknown block device %s\n", device_name);
|
|
492 |
/* FIXME: add error handling */
|
509 |
493 |
}
|
510 |
|
} else if(flags & BLK_MIG_FLAG_EOS) {
|
511 |
|
|
512 |
|
} else {
|
|
494 |
} else if (!(flags & BLK_MIG_FLAG_EOS)) {
|
513 |
495 |
printf("Unknown flags\n");
|
|
496 |
/* FIXME: add error handling */
|
514 |
497 |
}
|
515 |
|
} while(!(flags & BLK_MIG_FLAG_EOS));
|
516 |
|
|
|
498 |
} while (!(flags & BLK_MIG_FLAG_EOS));
|
|
499 |
|
517 |
500 |
qemu_free(buf);
|
518 |
501 |
|
519 |
502 |
return 0;
|
... | ... | |
525 |
508 |
|
526 |
509 |
block_mig_state->blk_enable = blk_enable;
|
527 |
510 |
block_mig_state->shared_base = shared_base;
|
528 |
|
|
|
511 |
|
529 |
512 |
/* shared base means that blk_enable = 1 */
|
530 |
513 |
block_mig_state->blk_enable |= shared_base;
|
531 |
|
|
532 |
|
return;
|
533 |
514 |
}
|
534 |
515 |
|
535 |
516 |
void blk_mig_info(void)
|
536 |
517 |
{
|
537 |
518 |
BlockDriverState *bs;
|
538 |
|
|
|
519 |
|
539 |
520 |
for (bs = bdrv_first; bs != NULL; bs = bs->next) {
|
540 |
521 |
printf("Device %s\n", bs->device_name);
|
541 |
|
if(bs->type == BDRV_TYPE_HD) {
|
542 |
|
printf("device %s format %s\n",
|
|
522 |
if (bs->type == BDRV_TYPE_HD) {
|
|
523 |
printf("device %s format %s\n",
|
543 |
524 |
bs->device_name, bs->drv->format_name);
|
544 |
525 |
}
|
545 |
526 |
}
|
546 |
527 |
}
|
547 |
528 |
|
548 |
529 |
void blk_mig_init(void)
|
549 |
|
{
|
550 |
|
|
|
530 |
{
|
551 |
531 |
block_mig_state = qemu_mallocz(sizeof(BlkMigState));
|
552 |
|
|
553 |
|
register_savevm_live("block", 0, 1, block_set_params, block_save_live,
|
554 |
|
NULL, block_load, block_mig_state);
|
555 |
532 |
|
556 |
|
|
|
533 |
register_savevm_live("block", 0, 1, block_set_params, block_save_live,
|
|
534 |
NULL, block_load, block_mig_state);
|
557 |
535 |
}
|