Statistics
| Branch: | Revision:

root / dma-helpers.c @ b42ec42d

History | View | Annotate | Download (4.2 kB)

1 244ab90e aliguori
/*
2 244ab90e aliguori
 * DMA helper functions
3 244ab90e aliguori
 *
4 244ab90e aliguori
 * Copyright (c) 2009 Red Hat
5 244ab90e aliguori
 *
6 244ab90e aliguori
 * This work is licensed under the terms of the GNU General Public License
7 244ab90e aliguori
 * (GNU GPL), version 2 or later.
8 244ab90e aliguori
 */
9 244ab90e aliguori
10 244ab90e aliguori
#include "dma.h"
11 59a703eb aliguori
#include "block_int.h"
12 244ab90e aliguori
13 244ab90e aliguori
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
14 244ab90e aliguori
{
15 244ab90e aliguori
    qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
16 244ab90e aliguori
    qsg->nsg = 0;
17 244ab90e aliguori
    qsg->nalloc = alloc_hint;
18 244ab90e aliguori
    qsg->size = 0;
19 244ab90e aliguori
}
20 244ab90e aliguori
21 244ab90e aliguori
void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
22 244ab90e aliguori
                     target_phys_addr_t len)
23 244ab90e aliguori
{
24 244ab90e aliguori
    if (qsg->nsg == qsg->nalloc) {
25 244ab90e aliguori
        qsg->nalloc = 2 * qsg->nalloc + 1;
26 244ab90e aliguori
        qsg->sg = qemu_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
27 244ab90e aliguori
    }
28 244ab90e aliguori
    qsg->sg[qsg->nsg].base = base;
29 244ab90e aliguori
    qsg->sg[qsg->nsg].len = len;
30 244ab90e aliguori
    qsg->size += len;
31 244ab90e aliguori
    ++qsg->nsg;
32 244ab90e aliguori
}
33 244ab90e aliguori
34 244ab90e aliguori
void qemu_sglist_destroy(QEMUSGList *qsg)
35 244ab90e aliguori
{
36 244ab90e aliguori
    qemu_free(qsg->sg);
37 244ab90e aliguori
}
38 244ab90e aliguori
39 59a703eb aliguori
typedef struct {
40 59a703eb aliguori
    BlockDriverState *bs;
41 59a703eb aliguori
    BlockDriverAIOCB *acb;
42 59a703eb aliguori
    QEMUSGList *sg;
43 59a703eb aliguori
    uint64_t sector_num;
44 59a703eb aliguori
    int is_write;
45 59a703eb aliguori
    int sg_cur_index;
46 59a703eb aliguori
    target_phys_addr_t sg_cur_byte;
47 59a703eb aliguori
    QEMUIOVector iov;
48 59a703eb aliguori
    QEMUBH *bh;
49 59a703eb aliguori
} DMABlockState;
50 59a703eb aliguori
51 59a703eb aliguori
static void dma_bdrv_cb(void *opaque, int ret);
52 59a703eb aliguori
53 59a703eb aliguori
static void reschedule_dma(void *opaque)
54 59a703eb aliguori
{
55 59a703eb aliguori
    DMABlockState *dbs = (DMABlockState *)opaque;
56 59a703eb aliguori
57 59a703eb aliguori
    qemu_bh_delete(dbs->bh);
58 59a703eb aliguori
    dbs->bh = NULL;
59 59a703eb aliguori
    dma_bdrv_cb(opaque, 0);
60 59a703eb aliguori
}
61 59a703eb aliguori
62 59a703eb aliguori
static void continue_after_map_failure(void *opaque)
63 59a703eb aliguori
{
64 59a703eb aliguori
    DMABlockState *dbs = (DMABlockState *)opaque;
65 59a703eb aliguori
66 59a703eb aliguori
    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
67 59a703eb aliguori
    qemu_bh_schedule(dbs->bh);
68 59a703eb aliguori
}
69 59a703eb aliguori
70 59a703eb aliguori
static void dma_bdrv_cb(void *opaque, int ret)
71 59a703eb aliguori
{
72 59a703eb aliguori
    DMABlockState *dbs = (DMABlockState *)opaque;
73 59a703eb aliguori
    target_phys_addr_t cur_addr, cur_len;
74 59a703eb aliguori
    void *mem;
75 59a703eb aliguori
    int i;
76 59a703eb aliguori
77 59a703eb aliguori
    dbs->sector_num += dbs->iov.size / 512;
78 59a703eb aliguori
    for (i = 0; i < dbs->iov.niov; ++i) {
79 59a703eb aliguori
        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
80 59a703eb aliguori
                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
81 59a703eb aliguori
                                  dbs->iov.iov[i].iov_len);
82 59a703eb aliguori
    }
83 59a703eb aliguori
    qemu_iovec_reset(&dbs->iov);
84 59a703eb aliguori
85 59a703eb aliguori
    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
86 59a703eb aliguori
        dbs->acb->cb(dbs->acb->opaque, ret);
87 59a703eb aliguori
        qemu_iovec_destroy(&dbs->iov);
88 59a703eb aliguori
        qemu_aio_release(dbs->acb);
89 59a703eb aliguori
        qemu_free(dbs);
90 59a703eb aliguori
        return;
91 59a703eb aliguori
    }
92 59a703eb aliguori
93 59a703eb aliguori
    while (dbs->sg_cur_index < dbs->sg->nsg) {
94 59a703eb aliguori
        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
95 59a703eb aliguori
        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
96 59a703eb aliguori
        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
97 59a703eb aliguori
        if (!mem)
98 59a703eb aliguori
            break;
99 59a703eb aliguori
        qemu_iovec_add(&dbs->iov, mem, cur_len);
100 59a703eb aliguori
        dbs->sg_cur_byte += cur_len;
101 59a703eb aliguori
        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
102 59a703eb aliguori
            dbs->sg_cur_byte = 0;
103 59a703eb aliguori
            ++dbs->sg_cur_index;
104 59a703eb aliguori
        }
105 59a703eb aliguori
    }
106 59a703eb aliguori
107 59a703eb aliguori
    if (dbs->iov.size == 0) {
108 59a703eb aliguori
        cpu_register_map_client(dbs, continue_after_map_failure);
109 59a703eb aliguori
        return;
110 59a703eb aliguori
    }
111 59a703eb aliguori
112 59a703eb aliguori
    if (dbs->is_write) {
113 59a703eb aliguori
        bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
114 59a703eb aliguori
                        dbs->iov.size / 512, dma_bdrv_cb, dbs);
115 59a703eb aliguori
    } else {
116 59a703eb aliguori
        bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
117 59a703eb aliguori
                       dbs->iov.size / 512, dma_bdrv_cb, dbs);
118 59a703eb aliguori
    }
119 59a703eb aliguori
}
120 59a703eb aliguori
121 59a703eb aliguori
static BlockDriverAIOCB *dma_bdrv_io(
122 59a703eb aliguori
    BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
123 59a703eb aliguori
    BlockDriverCompletionFunc *cb, void *opaque,
124 59a703eb aliguori
    int is_write)
125 59a703eb aliguori
{
126 59a703eb aliguori
    DMABlockState *dbs = qemu_malloc(sizeof(*dbs));
127 59a703eb aliguori
128 59a703eb aliguori
    dbs->bs = bs;
129 59a703eb aliguori
    dbs->acb = qemu_aio_get(bs, cb, opaque);
130 59a703eb aliguori
    dbs->sg = sg;
131 59a703eb aliguori
    dbs->sector_num = sector_num;
132 59a703eb aliguori
    dbs->sg_cur_index = 0;
133 59a703eb aliguori
    dbs->sg_cur_byte = 0;
134 59a703eb aliguori
    dbs->is_write = is_write;
135 59a703eb aliguori
    dbs->bh = NULL;
136 59a703eb aliguori
    qemu_iovec_init(&dbs->iov, sg->nsg);
137 59a703eb aliguori
    dma_bdrv_cb(dbs, 0);
138 59a703eb aliguori
    return dbs->acb;
139 59a703eb aliguori
}
140 59a703eb aliguori
141 59a703eb aliguori
142 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
143 59a703eb aliguori
                                QEMUSGList *sg, uint64_t sector,
144 59a703eb aliguori
                                void (*cb)(void *opaque, int ret), void *opaque)
145 59a703eb aliguori
{
146 59a703eb aliguori
    return dma_bdrv_io(bs, sg, sector, cb, opaque, 0);
147 59a703eb aliguori
}
148 59a703eb aliguori
149 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
150 59a703eb aliguori
                                 QEMUSGList *sg, uint64_t sector,
151 59a703eb aliguori
                                 void (*cb)(void *opaque, int ret), void *opaque)
152 59a703eb aliguori
{
153 59a703eb aliguori
    return dma_bdrv_io(bs, sg, sector, cb, opaque, 1);
154 59a703eb aliguori
}