Statistics
| Branch: | Revision:

root / dma-helpers.c @ 8171ee35

History | View | Annotate | Download (5.9 kB)

1 244ab90e aliguori
/*
2 244ab90e aliguori
 * DMA helper functions
3 244ab90e aliguori
 *
4 244ab90e aliguori
 * Copyright (c) 2009 Red Hat
5 244ab90e aliguori
 *
6 244ab90e aliguori
 * This work is licensed under the terms of the GNU General Public License
7 244ab90e aliguori
 * (GNU GPL), version 2 or later.
8 244ab90e aliguori
 */
9 244ab90e aliguori
10 244ab90e aliguori
#include "dma.h"
11 59a703eb aliguori
#include "block_int.h"
12 c57c4658 Kevin Wolf
#include "trace.h"
13 244ab90e aliguori
14 244ab90e aliguori
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
15 244ab90e aliguori
{
16 7267c094 Anthony Liguori
    qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
17 244ab90e aliguori
    qsg->nsg = 0;
18 244ab90e aliguori
    qsg->nalloc = alloc_hint;
19 244ab90e aliguori
    qsg->size = 0;
20 244ab90e aliguori
}
21 244ab90e aliguori
22 d3231181 David Gibson
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
23 244ab90e aliguori
{
24 244ab90e aliguori
    if (qsg->nsg == qsg->nalloc) {
25 244ab90e aliguori
        qsg->nalloc = 2 * qsg->nalloc + 1;
26 7267c094 Anthony Liguori
        qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
27 244ab90e aliguori
    }
28 244ab90e aliguori
    qsg->sg[qsg->nsg].base = base;
29 244ab90e aliguori
    qsg->sg[qsg->nsg].len = len;
30 244ab90e aliguori
    qsg->size += len;
31 244ab90e aliguori
    ++qsg->nsg;
32 244ab90e aliguori
}
33 244ab90e aliguori
34 244ab90e aliguori
void qemu_sglist_destroy(QEMUSGList *qsg)
35 244ab90e aliguori
{
36 7267c094 Anthony Liguori
    g_free(qsg->sg);
37 244ab90e aliguori
}
38 244ab90e aliguori
39 59a703eb aliguori
typedef struct {
40 37b7842c aliguori
    BlockDriverAIOCB common;
41 59a703eb aliguori
    BlockDriverState *bs;
42 59a703eb aliguori
    BlockDriverAIOCB *acb;
43 59a703eb aliguori
    QEMUSGList *sg;
44 59a703eb aliguori
    uint64_t sector_num;
45 bbca72c6 Paolo Bonzini
    bool to_dev;
46 c3adb5b9 Paolo Bonzini
    bool in_cancel;
47 59a703eb aliguori
    int sg_cur_index;
48 d3231181 David Gibson
    dma_addr_t sg_cur_byte;
49 59a703eb aliguori
    QEMUIOVector iov;
50 59a703eb aliguori
    QEMUBH *bh;
51 cb144ccb Christoph Hellwig
    DMAIOFunc *io_func;
52 37b7842c aliguori
} DMAAIOCB;
53 59a703eb aliguori
54 59a703eb aliguori
static void dma_bdrv_cb(void *opaque, int ret);
55 59a703eb aliguori
56 59a703eb aliguori
static void reschedule_dma(void *opaque)
57 59a703eb aliguori
{
58 37b7842c aliguori
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
59 59a703eb aliguori
60 59a703eb aliguori
    qemu_bh_delete(dbs->bh);
61 59a703eb aliguori
    dbs->bh = NULL;
62 c3adb5b9 Paolo Bonzini
    dma_bdrv_cb(dbs, 0);
63 59a703eb aliguori
}
64 59a703eb aliguori
65 59a703eb aliguori
static void continue_after_map_failure(void *opaque)
66 59a703eb aliguori
{
67 37b7842c aliguori
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
68 59a703eb aliguori
69 59a703eb aliguori
    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
70 59a703eb aliguori
    qemu_bh_schedule(dbs->bh);
71 59a703eb aliguori
}
72 59a703eb aliguori
73 7403b14e aliguori
static void dma_bdrv_unmap(DMAAIOCB *dbs)
74 59a703eb aliguori
{
75 59a703eb aliguori
    int i;
76 59a703eb aliguori
77 59a703eb aliguori
    for (i = 0; i < dbs->iov.niov; ++i) {
78 59a703eb aliguori
        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
79 bbca72c6 Paolo Bonzini
                                  dbs->iov.iov[i].iov_len, !dbs->to_dev,
80 59a703eb aliguori
                                  dbs->iov.iov[i].iov_len);
81 59a703eb aliguori
    }
82 c3adb5b9 Paolo Bonzini
    qemu_iovec_reset(&dbs->iov);
83 c3adb5b9 Paolo Bonzini
}
84 c3adb5b9 Paolo Bonzini
85 c3adb5b9 Paolo Bonzini
static void dma_complete(DMAAIOCB *dbs, int ret)
86 c3adb5b9 Paolo Bonzini
{
87 c57c4658 Kevin Wolf
    trace_dma_complete(dbs, ret, dbs->common.cb);
88 c57c4658 Kevin Wolf
89 c3adb5b9 Paolo Bonzini
    dma_bdrv_unmap(dbs);
90 c3adb5b9 Paolo Bonzini
    if (dbs->common.cb) {
91 c3adb5b9 Paolo Bonzini
        dbs->common.cb(dbs->common.opaque, ret);
92 c3adb5b9 Paolo Bonzini
    }
93 c3adb5b9 Paolo Bonzini
    qemu_iovec_destroy(&dbs->iov);
94 c3adb5b9 Paolo Bonzini
    if (dbs->bh) {
95 c3adb5b9 Paolo Bonzini
        qemu_bh_delete(dbs->bh);
96 c3adb5b9 Paolo Bonzini
        dbs->bh = NULL;
97 c3adb5b9 Paolo Bonzini
    }
98 c3adb5b9 Paolo Bonzini
    if (!dbs->in_cancel) {
99 c3adb5b9 Paolo Bonzini
        /* Requests may complete while dma_aio_cancel is in progress.  In
100 c3adb5b9 Paolo Bonzini
         * this case, the AIOCB should not be released because it is still
101 c3adb5b9 Paolo Bonzini
         * referenced by dma_aio_cancel.  */
102 c3adb5b9 Paolo Bonzini
        qemu_aio_release(dbs);
103 c3adb5b9 Paolo Bonzini
    }
104 7403b14e aliguori
}
105 7403b14e aliguori
106 856ae5c3 blueswir1
static void dma_bdrv_cb(void *opaque, int ret)
107 7403b14e aliguori
{
108 7403b14e aliguori
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
109 c227f099 Anthony Liguori
    target_phys_addr_t cur_addr, cur_len;
110 7403b14e aliguori
    void *mem;
111 7403b14e aliguori
112 c57c4658 Kevin Wolf
    trace_dma_bdrv_cb(dbs, ret);
113 c57c4658 Kevin Wolf
114 7403b14e aliguori
    dbs->acb = NULL;
115 7403b14e aliguori
    dbs->sector_num += dbs->iov.size / 512;
116 7403b14e aliguori
    dma_bdrv_unmap(dbs);
117 59a703eb aliguori
118 59a703eb aliguori
    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
119 c3adb5b9 Paolo Bonzini
        dma_complete(dbs, ret);
120 59a703eb aliguori
        return;
121 59a703eb aliguori
    }
122 59a703eb aliguori
123 59a703eb aliguori
    while (dbs->sg_cur_index < dbs->sg->nsg) {
124 59a703eb aliguori
        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
125 59a703eb aliguori
        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
126 bbca72c6 Paolo Bonzini
        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->to_dev);
127 59a703eb aliguori
        if (!mem)
128 59a703eb aliguori
            break;
129 59a703eb aliguori
        qemu_iovec_add(&dbs->iov, mem, cur_len);
130 59a703eb aliguori
        dbs->sg_cur_byte += cur_len;
131 59a703eb aliguori
        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
132 59a703eb aliguori
            dbs->sg_cur_byte = 0;
133 59a703eb aliguori
            ++dbs->sg_cur_index;
134 59a703eb aliguori
        }
135 59a703eb aliguori
    }
136 59a703eb aliguori
137 59a703eb aliguori
    if (dbs->iov.size == 0) {
138 c57c4658 Kevin Wolf
        trace_dma_map_wait(dbs);
139 59a703eb aliguori
        cpu_register_map_client(dbs, continue_after_map_failure);
140 59a703eb aliguori
        return;
141 59a703eb aliguori
    }
142 59a703eb aliguori
143 cb144ccb Christoph Hellwig
    dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
144 cb144ccb Christoph Hellwig
                            dbs->iov.size / 512, dma_bdrv_cb, dbs);
145 6bee44ea Paolo Bonzini
    assert(dbs->acb);
146 59a703eb aliguori
}
147 59a703eb aliguori
148 c16b5a2c Christoph Hellwig
static void dma_aio_cancel(BlockDriverAIOCB *acb)
149 c16b5a2c Christoph Hellwig
{
150 c16b5a2c Christoph Hellwig
    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
151 c16b5a2c Christoph Hellwig
152 c57c4658 Kevin Wolf
    trace_dma_aio_cancel(dbs);
153 c57c4658 Kevin Wolf
154 c16b5a2c Christoph Hellwig
    if (dbs->acb) {
155 c3adb5b9 Paolo Bonzini
        BlockDriverAIOCB *acb = dbs->acb;
156 c3adb5b9 Paolo Bonzini
        dbs->acb = NULL;
157 c3adb5b9 Paolo Bonzini
        dbs->in_cancel = true;
158 c3adb5b9 Paolo Bonzini
        bdrv_aio_cancel(acb);
159 c3adb5b9 Paolo Bonzini
        dbs->in_cancel = false;
160 c16b5a2c Christoph Hellwig
    }
161 c3adb5b9 Paolo Bonzini
    dbs->common.cb = NULL;
162 c3adb5b9 Paolo Bonzini
    dma_complete(dbs, 0);
163 c16b5a2c Christoph Hellwig
}
164 c16b5a2c Christoph Hellwig
165 c16b5a2c Christoph Hellwig
static AIOPool dma_aio_pool = {
166 c16b5a2c Christoph Hellwig
    .aiocb_size         = sizeof(DMAAIOCB),
167 c16b5a2c Christoph Hellwig
    .cancel             = dma_aio_cancel,
168 c16b5a2c Christoph Hellwig
};
169 c16b5a2c Christoph Hellwig
170 cb144ccb Christoph Hellwig
BlockDriverAIOCB *dma_bdrv_io(
171 59a703eb aliguori
    BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
172 cb144ccb Christoph Hellwig
    DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
173 bbca72c6 Paolo Bonzini
    void *opaque, bool to_dev)
174 59a703eb aliguori
{
175 cb144ccb Christoph Hellwig
    DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque);
176 59a703eb aliguori
177 c57c4658 Kevin Wolf
    trace_dma_bdrv_io(dbs, bs, sector_num, to_dev);
178 c57c4658 Kevin Wolf
179 37b7842c aliguori
    dbs->acb = NULL;
180 59a703eb aliguori
    dbs->bs = bs;
181 59a703eb aliguori
    dbs->sg = sg;
182 59a703eb aliguori
    dbs->sector_num = sector_num;
183 59a703eb aliguori
    dbs->sg_cur_index = 0;
184 59a703eb aliguori
    dbs->sg_cur_byte = 0;
185 bbca72c6 Paolo Bonzini
    dbs->to_dev = to_dev;
186 cb144ccb Christoph Hellwig
    dbs->io_func = io_func;
187 59a703eb aliguori
    dbs->bh = NULL;
188 59a703eb aliguori
    qemu_iovec_init(&dbs->iov, sg->nsg);
189 59a703eb aliguori
    dma_bdrv_cb(dbs, 0);
190 37b7842c aliguori
    return &dbs->common;
191 59a703eb aliguori
}
192 59a703eb aliguori
193 59a703eb aliguori
194 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
195 59a703eb aliguori
                                QEMUSGList *sg, uint64_t sector,
196 59a703eb aliguori
                                void (*cb)(void *opaque, int ret), void *opaque)
197 59a703eb aliguori
{
198 bbca72c6 Paolo Bonzini
    return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque, false);
199 59a703eb aliguori
}
200 59a703eb aliguori
201 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
202 59a703eb aliguori
                                 QEMUSGList *sg, uint64_t sector,
203 59a703eb aliguori
                                 void (*cb)(void *opaque, int ret), void *opaque)
204 59a703eb aliguori
{
205 bbca72c6 Paolo Bonzini
    return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque, true);
206 59a703eb aliguori
}
207 8171ee35 Paolo Bonzini
208 8171ee35 Paolo Bonzini
209 8171ee35 Paolo Bonzini
static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, bool to_dev)
210 8171ee35 Paolo Bonzini
{
211 8171ee35 Paolo Bonzini
    uint64_t resid;
212 8171ee35 Paolo Bonzini
    int sg_cur_index;
213 8171ee35 Paolo Bonzini
214 8171ee35 Paolo Bonzini
    resid = sg->size;
215 8171ee35 Paolo Bonzini
    sg_cur_index = 0;
216 8171ee35 Paolo Bonzini
    len = MIN(len, resid);
217 8171ee35 Paolo Bonzini
    while (len > 0) {
218 8171ee35 Paolo Bonzini
        ScatterGatherEntry entry = sg->sg[sg_cur_index++];
219 8171ee35 Paolo Bonzini
        int32_t xfer = MIN(len, entry.len);
220 8171ee35 Paolo Bonzini
        cpu_physical_memory_rw(entry.base, ptr, xfer, !to_dev);
221 8171ee35 Paolo Bonzini
        ptr += xfer;
222 8171ee35 Paolo Bonzini
        len -= xfer;
223 8171ee35 Paolo Bonzini
        resid -= xfer;
224 8171ee35 Paolo Bonzini
    }
225 8171ee35 Paolo Bonzini
226 8171ee35 Paolo Bonzini
    return resid;
227 8171ee35 Paolo Bonzini
}
228 8171ee35 Paolo Bonzini
229 8171ee35 Paolo Bonzini
uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
230 8171ee35 Paolo Bonzini
{
231 8171ee35 Paolo Bonzini
    return dma_buf_rw(ptr, len, sg, 0);
232 8171ee35 Paolo Bonzini
}
233 8171ee35 Paolo Bonzini
234 8171ee35 Paolo Bonzini
uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
235 8171ee35 Paolo Bonzini
{
236 8171ee35 Paolo Bonzini
    return dma_buf_rw(ptr, len, sg, 1);
237 8171ee35 Paolo Bonzini
}