Revision faf575c1 block/qcow2.c

b/block/qcow2.c
381 381
    int64_t sector_num;
382 382
    QEMUIOVector *qiov;
383 383
    int remaining_sectors;
384
    int cur_nr_sectors;	/* number of sectors in current iteration */
385 384
    uint64_t bytes_done;
386 385
    uint64_t cluster_offset;
387 386
    uint8_t *cluster_data;
......
399 398
    BDRVQcowState *s = bs->opaque;
400 399
    int index_in_cluster, n1;
401 400
    int ret;
402

  
403
    /* post process the read buffer */
404
    if (!acb->cluster_offset) {
405
        /* nothing to do */
406
    } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
407
        /* nothing to do */
408
    } else {
409
        if (s->crypt_method) {
410
            qcow2_encrypt_sectors(s, acb->sector_num,  acb->cluster_data,
411
                acb->cluster_data, acb->cur_nr_sectors, 0, &s->aes_decrypt_key);
412
            qemu_iovec_reset(&acb->hd_qiov);
413
            qemu_iovec_copy(&acb->hd_qiov, acb->qiov, acb->bytes_done,
414
                acb->cur_nr_sectors * 512);
415
            qemu_iovec_from_buffer(&acb->hd_qiov, acb->cluster_data,
416
                512 * acb->cur_nr_sectors);
417
        }
418
    }
419

  
420
    acb->remaining_sectors -= acb->cur_nr_sectors;
421
    acb->sector_num += acb->cur_nr_sectors;
422
    acb->bytes_done += acb->cur_nr_sectors * 512;
401
    int cur_nr_sectors; /* number of sectors in current iteration */
423 402

  
424 403
    if (acb->remaining_sectors == 0) {
425 404
        /* request completed */
426 405
        return 0;
427 406
    }
428 407

  
429
    /* prepare next AIO request */
430
    acb->cur_nr_sectors = acb->remaining_sectors;
408
    /* prepare next request */
409
    cur_nr_sectors = acb->remaining_sectors;
431 410
    if (s->crypt_method) {
432
        acb->cur_nr_sectors = MIN(acb->cur_nr_sectors,
411
        cur_nr_sectors = MIN(cur_nr_sectors,
433 412
            QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors);
434 413
    }
435 414

  
436 415
    ret = qcow2_get_cluster_offset(bs, acb->sector_num << 9,
437
        &acb->cur_nr_sectors, &acb->cluster_offset);
416
        &cur_nr_sectors, &acb->cluster_offset);
438 417
    if (ret < 0) {
439 418
        return ret;
440 419
    }
......
443 422

  
444 423
    qemu_iovec_reset(&acb->hd_qiov);
445 424
    qemu_iovec_copy(&acb->hd_qiov, acb->qiov, acb->bytes_done,
446
        acb->cur_nr_sectors * 512);
425
        cur_nr_sectors * 512);
447 426

  
448 427
    if (!acb->cluster_offset) {
449 428

  
450 429
        if (bs->backing_hd) {
451 430
            /* read from the base image */
452 431
            n1 = qcow2_backing_read1(bs->backing_hd, &acb->hd_qiov,
453
                acb->sector_num, acb->cur_nr_sectors);
432
                acb->sector_num, cur_nr_sectors);
454 433
            if (n1 > 0) {
455 434
                BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
456 435
                qemu_co_mutex_unlock(&s->lock);
......
461 440
                    return ret;
462 441
                }
463 442
            }
464
            return 1;
465 443
        } else {
466 444
            /* Note: in this case, no need to wait */
467
            qemu_iovec_memset(&acb->hd_qiov, 0, 512 * acb->cur_nr_sectors);
468
            return 1;
445
            qemu_iovec_memset(&acb->hd_qiov, 0, 512 * cur_nr_sectors);
469 446
        }
470 447
    } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
471 448
        /* add AIO support for compressed blocks ? */
......
476 453

  
477 454
        qemu_iovec_from_buffer(&acb->hd_qiov,
478 455
            s->cluster_cache + index_in_cluster * 512,
479
            512 * acb->cur_nr_sectors);
480

  
481
        return 1;
456
            512 * cur_nr_sectors);
482 457
    } else {
483 458
        if ((acb->cluster_offset & 511) != 0) {
484 459
            return -EIO;
......
494 469
                    g_malloc0(QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
495 470
            }
496 471

  
497
            assert(acb->cur_nr_sectors <=
472
            assert(cur_nr_sectors <=
498 473
                QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors);
499 474
            qemu_iovec_reset(&acb->hd_qiov);
500 475
            qemu_iovec_add(&acb->hd_qiov, acb->cluster_data,
501
                512 * acb->cur_nr_sectors);
476
                512 * cur_nr_sectors);
502 477
        }
503 478

  
504 479
        BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
505 480
        qemu_co_mutex_unlock(&s->lock);
506 481
        ret = bdrv_co_readv(bs->file,
507 482
                            (acb->cluster_offset >> 9) + index_in_cluster,
508
                            acb->cur_nr_sectors, &acb->hd_qiov);
483
                            cur_nr_sectors, &acb->hd_qiov);
509 484
        qemu_co_mutex_lock(&s->lock);
510 485
        if (ret < 0) {
511 486
            return ret;
512 487
        }
488
        if (s->crypt_method) {
489
            qcow2_encrypt_sectors(s, acb->sector_num,  acb->cluster_data,
490
                acb->cluster_data, cur_nr_sectors, 0, &s->aes_decrypt_key);
491
            qemu_iovec_reset(&acb->hd_qiov);
492
            qemu_iovec_copy(&acb->hd_qiov, acb->qiov, acb->bytes_done,
493
                cur_nr_sectors * 512);
494
            qemu_iovec_from_buffer(&acb->hd_qiov, acb->cluster_data,
495
                512 * cur_nr_sectors);
496
        }
513 497
    }
514 498

  
499
    acb->remaining_sectors -= cur_nr_sectors;
500
    acb->sector_num += cur_nr_sectors;
501
    acb->bytes_done += cur_nr_sectors * 512;
502

  
515 503
    return 1;
516 504
}
517 505

  
......
529 517

  
530 518
    acb->bytes_done = 0;
531 519
    acb->remaining_sectors = nb_sectors;
532
    acb->cur_nr_sectors = 0;
533 520
    acb->cluster_offset = 0;
534 521
    acb->l2meta.nb_clusters = 0;
535 522
    qemu_co_queue_init(&acb->l2meta.dependent_requests);
......
582 569
    int index_in_cluster;
583 570
    int n_end;
584 571
    int ret;
585

  
586
    ret = qcow2_alloc_cluster_link_l2(bs, &acb->l2meta);
587

  
588
    run_dependent_requests(s, &acb->l2meta);
589

  
590
    if (ret < 0) {
591
        return ret;
592
    }
593

  
594
    acb->remaining_sectors -= acb->cur_nr_sectors;
595
    acb->sector_num += acb->cur_nr_sectors;
596
    acb->bytes_done += acb->cur_nr_sectors * 512;
572
    int cur_nr_sectors; /* number of sectors in current iteration */
597 573

  
598 574
    if (acb->remaining_sectors == 0) {
599 575
        /* request completed */
......
607 583
        n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
608 584

  
609 585
    ret = qcow2_alloc_cluster_offset(bs, acb->sector_num << 9,
610
        index_in_cluster, n_end, &acb->cur_nr_sectors, &acb->l2meta);
586
        index_in_cluster, n_end, &cur_nr_sectors, &acb->l2meta);
611 587
    if (ret < 0) {
612 588
        return ret;
613 589
    }
......
617 593

  
618 594
    qemu_iovec_reset(&acb->hd_qiov);
619 595
    qemu_iovec_copy(&acb->hd_qiov, acb->qiov, acb->bytes_done,
620
        acb->cur_nr_sectors * 512);
596
        cur_nr_sectors * 512);
621 597

  
622 598
    if (s->crypt_method) {
623 599
        if (!acb->cluster_data) {
......
629 605
        qemu_iovec_to_buffer(&acb->hd_qiov, acb->cluster_data);
630 606

  
631 607
        qcow2_encrypt_sectors(s, acb->sector_num, acb->cluster_data,
632
            acb->cluster_data, acb->cur_nr_sectors, 1, &s->aes_encrypt_key);
608
            acb->cluster_data, cur_nr_sectors, 1, &s->aes_encrypt_key);
633 609

  
634 610
        qemu_iovec_reset(&acb->hd_qiov);
635 611
        qemu_iovec_add(&acb->hd_qiov, acb->cluster_data,
636
            acb->cur_nr_sectors * 512);
612
            cur_nr_sectors * 512);
637 613
    }
638 614

  
639 615
    BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
640 616
    qemu_co_mutex_unlock(&s->lock);
641 617
    ret = bdrv_co_writev(bs->file,
642 618
                         (acb->cluster_offset >> 9) + index_in_cluster,
643
                         acb->cur_nr_sectors, &acb->hd_qiov);
619
                         cur_nr_sectors, &acb->hd_qiov);
644 620
    qemu_co_mutex_lock(&s->lock);
645 621
    if (ret < 0) {
646 622
        return ret;
647 623
    }
648 624

  
625
    ret = qcow2_alloc_cluster_link_l2(bs, &acb->l2meta);
626

  
627
    run_dependent_requests(s, &acb->l2meta);
628

  
629
    if (ret < 0) {
630
        return ret;
631
    }
632

  
633
    acb->remaining_sectors -= cur_nr_sectors;
634
    acb->sector_num += cur_nr_sectors;
635
    acb->bytes_done += cur_nr_sectors * 512;
636

  
649 637
    return 1;
650 638
}
651 639

  

Also available in: Unified diff