Revision 42fb2e07

b/hw/virtio.c
360 360
    return 0;
361 361
}
362 362

  
363
void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
364
    size_t num_sg, int is_write)
365
{
366
    unsigned int i;
367
    target_phys_addr_t len;
368

  
369
    for (i = 0; i < num_sg; i++) {
370
        len = sg[i].iov_len;
371
        sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
372
        if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
373
            fprintf(stderr, "virtio: trying to map MMIO memory\n");
374
            exit(1);
375
        }
376
    }
377
}
378

  
363 379
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
364 380
{
365 381
    unsigned int i, head, max;
366 382
    target_phys_addr_t desc_pa = vq->vring.desc;
367
    target_phys_addr_t len;
368 383

  
369 384
    if (!virtqueue_num_heads(vq, vq->last_avail_idx))
370 385
        return 0;
......
388 403
        i = 0;
389 404
    }
390 405

  
406
    /* Collect all the descriptors */
391 407
    do {
392 408
        struct iovec *sg;
393
        int is_write = 0;
394 409

  
395 410
        if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
396 411
            elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
397 412
            sg = &elem->in_sg[elem->in_num++];
398
            is_write = 1;
399
        } else
413
        } else {
414
            elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i);
400 415
            sg = &elem->out_sg[elem->out_num++];
416
        }
401 417

  
402
        /* Grab the first descriptor, and check it's OK. */
403 418
        sg->iov_len = vring_desc_len(desc_pa, i);
404
        len = sg->iov_len;
405

  
406
        sg->iov_base = cpu_physical_memory_map(vring_desc_addr(desc_pa, i),
407
                                               &len, is_write);
408

  
409
        if (sg->iov_base == NULL || len != sg->iov_len) {
410
            fprintf(stderr, "virtio: trying to map MMIO memory\n");
411
            exit(1);
412
        }
413 419

  
414 420
        /* If we've got too many, that implies a descriptor loop. */
415 421
        if ((elem->in_num + elem->out_num) > max) {
......
418 424
        }
419 425
    } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
420 426

  
427
    /* Now map what we have collected */
428
    virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
429
    virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0);
430

  
421 431
    elem->index = head;
422 432

  
423 433
    vq->inuse++;
b/hw/virtio.h
81 81
    unsigned int out_num;
82 82
    unsigned int in_num;
83 83
    target_phys_addr_t in_addr[VIRTQUEUE_MAX_SIZE];
84
    target_phys_addr_t out_addr[VIRTQUEUE_MAX_SIZE];
84 85
    struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
85 86
    struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
86 87
} VirtQueueElement;
......
142 143
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
143 144
                    unsigned int len, unsigned int idx);
144 145

  
146
void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
147
    size_t num_sg, int is_write);
145 148
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem);
146 149
int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes);
147 150

  

Also available in: Unified diff