Revision 6f321e93 block/qed.c
b/block/qed.c | ||
---|---|---|
12 | 12 |
* |
13 | 13 |
*/ |
14 | 14 |
|
15 |
#include "qemu-timer.h" |
|
15 | 16 |
#include "trace.h" |
16 | 17 |
#include "qed.h" |
17 | 18 |
#include "qerror.h" |
... | ... | |
291 | 292 |
|
292 | 293 |
static void qed_aio_next_io(void *opaque, int ret); |
293 | 294 |
|
295 |
static void qed_plug_allocating_write_reqs(BDRVQEDState *s) |
|
296 |
{ |
|
297 |
assert(!s->allocating_write_reqs_plugged); |
|
298 |
|
|
299 |
s->allocating_write_reqs_plugged = true; |
|
300 |
} |
|
301 |
|
|
302 |
static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) |
|
303 |
{ |
|
304 |
QEDAIOCB *acb; |
|
305 |
|
|
306 |
assert(s->allocating_write_reqs_plugged); |
|
307 |
|
|
308 |
s->allocating_write_reqs_plugged = false; |
|
309 |
|
|
310 |
acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); |
|
311 |
if (acb) { |
|
312 |
qed_aio_next_io(acb, 0); |
|
313 |
} |
|
314 |
} |
|
315 |
|
|
316 |
static void qed_finish_clear_need_check(void *opaque, int ret) |
|
317 |
{ |
|
318 |
/* Do nothing */ |
|
319 |
} |
|
320 |
|
|
321 |
static void qed_flush_after_clear_need_check(void *opaque, int ret) |
|
322 |
{ |
|
323 |
BDRVQEDState *s = opaque; |
|
324 |
|
|
325 |
bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s); |
|
326 |
|
|
327 |
/* No need to wait until flush completes */ |
|
328 |
qed_unplug_allocating_write_reqs(s); |
|
329 |
} |
|
330 |
|
|
331 |
static void qed_clear_need_check(void *opaque, int ret) |
|
332 |
{ |
|
333 |
BDRVQEDState *s = opaque; |
|
334 |
|
|
335 |
if (ret) { |
|
336 |
qed_unplug_allocating_write_reqs(s); |
|
337 |
return; |
|
338 |
} |
|
339 |
|
|
340 |
s->header.features &= ~QED_F_NEED_CHECK; |
|
341 |
qed_write_header(s, qed_flush_after_clear_need_check, s); |
|
342 |
} |
|
343 |
|
|
344 |
static void qed_need_check_timer_cb(void *opaque) |
|
345 |
{ |
|
346 |
BDRVQEDState *s = opaque; |
|
347 |
|
|
348 |
/* The timer should only fire when allocating writes have drained */ |
|
349 |
assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs)); |
|
350 |
|
|
351 |
trace_qed_need_check_timer_cb(s); |
|
352 |
|
|
353 |
qed_plug_allocating_write_reqs(s); |
|
354 |
|
|
355 |
/* Ensure writes are on disk before clearing flag */ |
|
356 |
bdrv_aio_flush(s->bs, qed_clear_need_check, s); |
|
357 |
} |
|
358 |
|
|
359 |
static void qed_start_need_check_timer(BDRVQEDState *s) |
|
360 |
{ |
|
361 |
trace_qed_start_need_check_timer(s); |
|
362 |
|
|
363 |
/* Use vm_clock so we don't alter the image file while suspended for |
|
364 |
* migration. |
|
365 |
*/ |
|
366 |
qemu_mod_timer(s->need_check_timer, qemu_get_clock_ns(vm_clock) + |
|
367 |
get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT); |
|
368 |
} |
|
369 |
|
|
370 |
/* It's okay to call this multiple times or when no timer is started */ |
|
371 |
static void qed_cancel_need_check_timer(BDRVQEDState *s) |
|
372 |
{ |
|
373 |
trace_qed_cancel_need_check_timer(s); |
|
374 |
qemu_del_timer(s->need_check_timer); |
|
375 |
} |
|
376 |
|
|
294 | 377 |
static int bdrv_qed_open(BlockDriverState *bs, int flags) |
295 | 378 |
{ |
296 | 379 |
BDRVQEDState *s = bs->opaque; |
... | ... | |
406 | 489 |
BdrvCheckResult result = {0}; |
407 | 490 |
|
408 | 491 |
ret = qed_check(s, &result, true); |
409 |
if (!ret && !result.corruptions && !result.check_errors) { |
|
492 |
if (ret) { |
|
493 |
goto out; |
|
494 |
} |
|
495 |
if (!result.corruptions && !result.check_errors) { |
|
410 | 496 |
/* Ensure fixes reach storage before clearing check bit */ |
411 | 497 |
bdrv_flush(s->bs); |
412 | 498 |
|
... | ... | |
416 | 502 |
} |
417 | 503 |
} |
418 | 504 |
|
505 |
s->need_check_timer = qemu_new_timer_ns(vm_clock, |
|
506 |
qed_need_check_timer_cb, s); |
|
507 |
|
|
419 | 508 |
out: |
420 | 509 |
if (ret) { |
421 | 510 |
qed_free_l2_cache(&s->l2_cache); |
... | ... | |
428 | 517 |
{ |
429 | 518 |
BDRVQEDState *s = bs->opaque; |
430 | 519 |
|
520 |
qed_cancel_need_check_timer(s); |
|
521 |
qemu_free_timer(s->need_check_timer); |
|
522 |
|
|
431 | 523 |
/* Ensure writes reach stable storage */ |
432 | 524 |
bdrv_flush(bs->file); |
433 | 525 |
|
... | ... | |
809 | 901 |
acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); |
810 | 902 |
if (acb) { |
811 | 903 |
qed_aio_next_io(acb, 0); |
904 |
} else if (s->header.features & QED_F_NEED_CHECK) { |
|
905 |
qed_start_need_check_timer(s); |
|
812 | 906 |
} |
813 | 907 |
} |
814 | 908 |
} |
... | ... | |
1014 | 1108 |
{ |
1015 | 1109 |
BDRVQEDState *s = acb_to_s(acb); |
1016 | 1110 |
|
1111 |
/* Cancel timer when the first allocating request comes in */ |
|
1112 |
if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) { |
|
1113 |
qed_cancel_need_check_timer(s); |
|
1114 |
} |
|
1115 |
|
|
1017 | 1116 |
/* Freeze this request if another allocating write is in progress */ |
1018 | 1117 |
if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { |
1019 | 1118 |
QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next); |
1020 | 1119 |
} |
1021 |
if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { |
|
1120 |
if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) || |
|
1121 |
s->allocating_write_reqs_plugged) { |
|
1022 | 1122 |
return; /* wait for existing request to finish */ |
1023 | 1123 |
} |
1024 | 1124 |
|
Also available in: Unified diff