Revision 7b88e48b

b/block/qcow2.c
332 332
    QEMUIOVector *qiov;
333 333
    uint8_t *buf;
334 334
    void *orig_buf;
335
    int nb_sectors;
336
    int n;
335
    int remaining_sectors;
336
    int cur_nr_sectors;	/* number of sectors in current iteration */
337 337
    uint64_t cluster_offset;
338 338
    uint8_t *cluster_data;
339 339
    BlockDriverAIOCB *hd_aiocb;
......
399 399
    } else {
400 400
        if (s->crypt_method) {
401 401
            qcow2_encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf,
402
                            acb->n, 0,
402
                            acb->cur_nr_sectors, 0,
403 403
                            &s->aes_decrypt_key);
404 404
        }
405 405
    }
406 406

  
407
    acb->nb_sectors -= acb->n;
408
    acb->sector_num += acb->n;
409
    acb->buf += acb->n * 512;
407
    acb->remaining_sectors -= acb->cur_nr_sectors;
408
    acb->sector_num += acb->cur_nr_sectors;
409
    acb->buf += acb->cur_nr_sectors * 512;
410 410

  
411
    if (acb->nb_sectors == 0) {
411
    if (acb->remaining_sectors == 0) {
412 412
        /* request completed */
413 413
        ret = 0;
414 414
        goto done;
415 415
    }
416 416

  
417 417
    /* prepare next AIO request */
418
    acb->n = acb->nb_sectors;
419
    acb->cluster_offset =
420
        qcow2_get_cluster_offset(bs, acb->sector_num << 9, &acb->n);
418
    acb->cur_nr_sectors = acb->remaining_sectors;
419
    acb->cluster_offset = qcow2_get_cluster_offset(bs, acb->sector_num << 9,
420
                                                   &acb->cur_nr_sectors);
421 421
    index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
422 422

  
423 423
    if (!acb->cluster_offset) {
424 424
        if (bs->backing_hd) {
425 425
            /* read from the base image */
426 426
            n1 = qcow2_backing_read1(bs->backing_hd, acb->sector_num,
427
                               acb->buf, acb->n);
427
                               acb->buf, acb->cur_nr_sectors);
428 428
            if (n1 > 0) {
429 429
                acb->hd_iov.iov_base = (void *)acb->buf;
430
                acb->hd_iov.iov_len = acb->n * 512;
430
                acb->hd_iov.iov_len = acb->cur_nr_sectors * 512;
431 431
                qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
432 432
                acb->hd_aiocb = bdrv_aio_readv(bs->backing_hd, acb->sector_num,
433
                                    &acb->hd_qiov, acb->n,
433
                                    &acb->hd_qiov, acb->cur_nr_sectors,
434 434
				    qcow_aio_read_cb, acb);
435 435
                if (acb->hd_aiocb == NULL)
436 436
                    goto done;
......
441 441
            }
442 442
        } else {
443 443
            /* Note: in this case, no need to wait */
444
            memset(acb->buf, 0, 512 * acb->n);
444
            memset(acb->buf, 0, 512 * acb->cur_nr_sectors);
445 445
            ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
446 446
            if (ret < 0)
447 447
                goto done;
......
450 450
        /* add AIO support for compressed blocks ? */
451 451
        if (qcow2_decompress_cluster(s, acb->cluster_offset) < 0)
452 452
            goto done;
453
        memcpy(acb->buf,
454
               s->cluster_cache + index_in_cluster * 512, 512 * acb->n);
453
        memcpy(acb->buf, s->cluster_cache + index_in_cluster * 512,
454
               512 * acb->cur_nr_sectors);
455 455
        ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
456 456
        if (ret < 0)
457 457
            goto done;
......
462 462
        }
463 463

  
464 464
        acb->hd_iov.iov_base = (void *)acb->buf;
465
        acb->hd_iov.iov_len = acb->n * 512;
465
        acb->hd_iov.iov_len = acb->cur_nr_sectors * 512;
466 466
        qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
467 467
        acb->hd_aiocb = bdrv_aio_readv(s->hd,
468 468
                            (acb->cluster_offset >> 9) + index_in_cluster,
469
                            &acb->hd_qiov, acb->n, qcow_aio_read_cb, acb);
469
                            &acb->hd_qiov, acb->cur_nr_sectors,
470
                            qcow_aio_read_cb, acb);
470 471
        if (acb->hd_aiocb == NULL)
471 472
            goto done;
472 473
    }
......
500 501
    } else {
501 502
        acb->buf = (uint8_t *)qiov->iov->iov_base;
502 503
    }
503
    acb->nb_sectors = nb_sectors;
504
    acb->n = 0;
504
    acb->remaining_sectors = nb_sectors;
505
    acb->cur_nr_sectors = 0;
505 506
    acb->cluster_offset = 0;
506 507
    acb->l2meta.nb_clusters = 0;
507 508
    QLIST_INIT(&acb->l2meta.dependent_requests);
......
569 570
    if (ret < 0)
570 571
        goto done;
571 572

  
572
    acb->nb_sectors -= acb->n;
573
    acb->sector_num += acb->n;
574
    acb->buf += acb->n * 512;
573
    acb->remaining_sectors -= acb->cur_nr_sectors;
574
    acb->sector_num += acb->cur_nr_sectors;
575
    acb->buf += acb->cur_nr_sectors * 512;
575 576

  
576
    if (acb->nb_sectors == 0) {
577
    if (acb->remaining_sectors == 0) {
577 578
        /* request completed */
578 579
        ret = 0;
579 580
        goto done;
580 581
    }
581 582

  
582 583
    index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
583
    n_end = index_in_cluster + acb->nb_sectors;
584
    n_end = index_in_cluster + acb->remaining_sectors;
584 585
    if (s->crypt_method &&
585 586
        n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors)
586 587
        n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
587 588

  
588 589
    ret = qcow2_alloc_cluster_offset(bs, acb->sector_num << 9,
589
        index_in_cluster, n_end, &acb->n, &acb->l2meta);
590
        index_in_cluster, n_end, &acb->cur_nr_sectors, &acb->l2meta);
590 591
    if (ret < 0) {
591 592
        goto done;
592 593
    }
......
608 609
                                             s->cluster_size);
609 610
        }
610 611
        qcow2_encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf,
611
                        acb->n, 1, &s->aes_encrypt_key);
612
                        acb->cur_nr_sectors, 1, &s->aes_encrypt_key);
612 613
        src_buf = acb->cluster_data;
613 614
    } else {
614 615
        src_buf = acb->buf;
615 616
    }
616 617
    acb->hd_iov.iov_base = (void *)src_buf;
617
    acb->hd_iov.iov_len = acb->n * 512;
618
    acb->hd_iov.iov_len = acb->cur_nr_sectors * 512;
618 619
    qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
619 620
    acb->hd_aiocb = bdrv_aio_writev(s->hd,
620 621
                                    (acb->cluster_offset >> 9) + index_in_cluster,
621
                                    &acb->hd_qiov, acb->n,
622
                                    &acb->hd_qiov, acb->cur_nr_sectors,
622 623
                                    qcow_aio_write_cb, acb);
623 624
    if (acb->hd_aiocb == NULL)
624 625
        goto done;

Also available in: Unified diff