Revision 2c3b32d2 block/qcow2-cluster.c

b/block/qcow2-cluster.c
1147 1147
    cluster_offset = 0;
1148 1148
    *host_offset = 0;
1149 1149

  
1150
    /*
1151
     * Now start gathering as many contiguous clusters as possible:
1152
     *
1153
     * 1. Check for overlaps with in-flight allocations
1154
     *
1155
     *      a) Overlap not in the first cluster -> shorten this request and let
1156
     *         the caller handle the rest in its next loop iteration.
1157
     *
1158
     *      b) Real overlaps of two requests. Yield and restart the search for
1159
     *         contiguous clusters (the situation could have changed while we
1160
     *         were sleeping)
1161
     *
1162
     *      c) TODO: Request starts in the same cluster as the in-flight
1163
     *         allocation ends. Shorten the COW of the in-fight allocation, set
1164
     *         cluster_offset to write to the same cluster and set up the right
1165
     *         synchronisation between the in-flight request and the new one.
1166
     */
1167
    cur_bytes = remaining;
1168
    ret = handle_dependencies(bs, start, &cur_bytes);
1169
    if (ret == -EAGAIN) {
1170
        goto again;
1171
    } else if (ret < 0) {
1172
        return ret;
1173
    } else {
1174
        /* handle_dependencies() may have decreased cur_bytes (shortened
1175
         * the allocations below) so that the next dependency is processed
1176
         * correctly during the next loop iteration. */
1177
    }
1178

  
1179
    /*
1180
     * 2. Count contiguous COPIED clusters.
1181
     */
1182
    ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1183
    if (ret < 0) {
1184
        return ret;
1185
    } else if (ret) {
1186
        if (!*host_offset) {
1187
            *host_offset = start_of_cluster(s, cluster_offset);
1150
    while (true) {
1151
        /*
1152
         * Now start gathering as many contiguous clusters as possible:
1153
         *
1154
         * 1. Check for overlaps with in-flight allocations
1155
         *
1156
         *      a) Overlap not in the first cluster -> shorten this request and
1157
         *         let the caller handle the rest in its next loop iteration.
1158
         *
1159
         *      b) Real overlaps of two requests. Yield and restart the search
1160
         *         for contiguous clusters (the situation could have changed
1161
         *         while we were sleeping)
1162
         *
1163
         *      c) TODO: Request starts in the same cluster as the in-flight
1164
         *         allocation ends. Shorten the COW of the in-fight allocation,
1165
         *         set cluster_offset to write to the same cluster and set up
1166
         *         the right synchronisation between the in-flight request and
1167
         *         the new one.
1168
         */
1169
        cur_bytes = remaining;
1170
        ret = handle_dependencies(bs, start, &cur_bytes);
1171
        if (ret == -EAGAIN) {
1172
            goto again;
1173
        } else if (ret < 0) {
1174
            return ret;
1175
        } else {
1176
            /* handle_dependencies() may have decreased cur_bytes (shortened
1177
             * the allocations below) so that the next dependency is processed
1178
             * correctly during the next loop iteration. */
1188 1179
        }
1189 1180

  
1190
        start           += cur_bytes;
1191
        remaining       -= cur_bytes;
1192
        cluster_offset  += cur_bytes;
1181
        /*
1182
         * 2. Count contiguous COPIED clusters.
1183
         */
1184
        ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1185
        if (ret < 0) {
1186
            return ret;
1187
        } else if (ret) {
1188
            if (!*host_offset) {
1189
                *host_offset = start_of_cluster(s, cluster_offset);
1190
            }
1193 1191

  
1194
        cur_bytes = remaining;
1195
    } else if (cur_bytes == 0) {
1196
        goto done;
1197
    }
1192
            start           += cur_bytes;
1193
            remaining       -= cur_bytes;
1194
            cluster_offset  += cur_bytes;
1198 1195

  
1199
    /* If there is something left to allocate, do that now */
1200
    if (remaining == 0) {
1201
        goto done;
1202
    }
1196
            cur_bytes = remaining;
1197
        } else if (cur_bytes == 0) {
1198
            break;
1199
        }
1203 1200

  
1204
    /*
1205
     * 3. If the request still hasn't completed, allocate new clusters,
1206
     *    considering any cluster_offset of steps 1c or 2.
1207
     */
1208
    ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1209
    if (ret < 0) {
1210
        return ret;
1211
    } else if (ret) {
1212
        if (!*host_offset) {
1213
            *host_offset = start_of_cluster(s, cluster_offset);
1201
        /* If there is something left to allocate, do that now */
1202
        if (remaining == 0) {
1203
            break;
1214 1204
        }
1215 1205

  
1216
        start           += cur_bytes;
1217
        remaining       -= cur_bytes;
1218
        cluster_offset  += cur_bytes;
1206
        /*
1207
         * 3. If the request still hasn't completed, allocate new clusters,
1208
         *    considering any cluster_offset of steps 1c or 2.
1209
         */
1210
        ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1211
        if (ret < 0) {
1212
            return ret;
1213
        } else if (ret) {
1214
            if (!*host_offset) {
1215
                *host_offset = start_of_cluster(s, cluster_offset);
1216
            }
1217

  
1218
            start           += cur_bytes;
1219
            remaining       -= cur_bytes;
1220
            cluster_offset  += cur_bytes;
1221

  
1222
            break;
1223
        } else {
1224
            assert(cur_bytes == 0);
1225
            break;
1226
        }
1219 1227
    }
1220 1228

  
1221
    /* Some cleanup work */
1222
done:
1223 1229
    *num = (n_end - n_start) - (remaining >> BDRV_SECTOR_BITS);
1224 1230
    assert(*num > 0);
1225 1231
    assert(*host_offset != 0);

Also available in: Unified diff