Statistics
| Branch: | Revision:

root / dma-helpers.c @ d3231181

History | View | Annotate | Download (5.1 kB)

1 244ab90e aliguori
/*
2 244ab90e aliguori
 * DMA helper functions
3 244ab90e aliguori
 *
4 244ab90e aliguori
 * Copyright (c) 2009 Red Hat
5 244ab90e aliguori
 *
6 244ab90e aliguori
 * This work is licensed under the terms of the GNU General Public License
7 244ab90e aliguori
 * (GNU GPL), version 2 or later.
8 244ab90e aliguori
 */
9 244ab90e aliguori
10 244ab90e aliguori
#include "dma.h"
11 59a703eb aliguori
#include "block_int.h"
12 244ab90e aliguori
13 244ab90e aliguori
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
14 244ab90e aliguori
{
15 7267c094 Anthony Liguori
    qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
16 244ab90e aliguori
    qsg->nsg = 0;
17 244ab90e aliguori
    qsg->nalloc = alloc_hint;
18 244ab90e aliguori
    qsg->size = 0;
19 244ab90e aliguori
}
20 244ab90e aliguori
21 d3231181 David Gibson
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
22 244ab90e aliguori
{
23 244ab90e aliguori
    if (qsg->nsg == qsg->nalloc) {
24 244ab90e aliguori
        qsg->nalloc = 2 * qsg->nalloc + 1;
25 7267c094 Anthony Liguori
        qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
26 244ab90e aliguori
    }
27 244ab90e aliguori
    qsg->sg[qsg->nsg].base = base;
28 244ab90e aliguori
    qsg->sg[qsg->nsg].len = len;
29 244ab90e aliguori
    qsg->size += len;
30 244ab90e aliguori
    ++qsg->nsg;
31 244ab90e aliguori
}
32 244ab90e aliguori
33 244ab90e aliguori
void qemu_sglist_destroy(QEMUSGList *qsg)
34 244ab90e aliguori
{
35 7267c094 Anthony Liguori
    g_free(qsg->sg);
36 244ab90e aliguori
}
37 244ab90e aliguori
38 59a703eb aliguori
typedef struct {
39 37b7842c aliguori
    BlockDriverAIOCB common;
40 59a703eb aliguori
    BlockDriverState *bs;
41 59a703eb aliguori
    BlockDriverAIOCB *acb;
42 59a703eb aliguori
    QEMUSGList *sg;
43 59a703eb aliguori
    uint64_t sector_num;
44 bbca72c6 Paolo Bonzini
    bool to_dev;
45 c3adb5b9 Paolo Bonzini
    bool in_cancel;
46 59a703eb aliguori
    int sg_cur_index;
47 d3231181 David Gibson
    dma_addr_t sg_cur_byte;
48 59a703eb aliguori
    QEMUIOVector iov;
49 59a703eb aliguori
    QEMUBH *bh;
50 cb144ccb Christoph Hellwig
    DMAIOFunc *io_func;
51 37b7842c aliguori
} DMAAIOCB;
52 59a703eb aliguori
53 59a703eb aliguori
static void dma_bdrv_cb(void *opaque, int ret);
54 59a703eb aliguori
55 59a703eb aliguori
static void reschedule_dma(void *opaque)
56 59a703eb aliguori
{
57 37b7842c aliguori
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
58 59a703eb aliguori
59 59a703eb aliguori
    qemu_bh_delete(dbs->bh);
60 59a703eb aliguori
    dbs->bh = NULL;
61 c3adb5b9 Paolo Bonzini
    dma_bdrv_cb(dbs, 0);
62 59a703eb aliguori
}
63 59a703eb aliguori
64 59a703eb aliguori
static void continue_after_map_failure(void *opaque)
65 59a703eb aliguori
{
66 37b7842c aliguori
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
67 59a703eb aliguori
68 59a703eb aliguori
    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
69 59a703eb aliguori
    qemu_bh_schedule(dbs->bh);
70 59a703eb aliguori
}
71 59a703eb aliguori
72 7403b14e aliguori
static void dma_bdrv_unmap(DMAAIOCB *dbs)
73 59a703eb aliguori
{
74 59a703eb aliguori
    int i;
75 59a703eb aliguori
76 59a703eb aliguori
    for (i = 0; i < dbs->iov.niov; ++i) {
77 59a703eb aliguori
        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
78 bbca72c6 Paolo Bonzini
                                  dbs->iov.iov[i].iov_len, !dbs->to_dev,
79 59a703eb aliguori
                                  dbs->iov.iov[i].iov_len);
80 59a703eb aliguori
    }
81 c3adb5b9 Paolo Bonzini
    qemu_iovec_reset(&dbs->iov);
82 c3adb5b9 Paolo Bonzini
}
83 c3adb5b9 Paolo Bonzini
84 c3adb5b9 Paolo Bonzini
static void dma_complete(DMAAIOCB *dbs, int ret)
85 c3adb5b9 Paolo Bonzini
{
86 c3adb5b9 Paolo Bonzini
    dma_bdrv_unmap(dbs);
87 c3adb5b9 Paolo Bonzini
    if (dbs->common.cb) {
88 c3adb5b9 Paolo Bonzini
        dbs->common.cb(dbs->common.opaque, ret);
89 c3adb5b9 Paolo Bonzini
    }
90 c3adb5b9 Paolo Bonzini
    qemu_iovec_destroy(&dbs->iov);
91 c3adb5b9 Paolo Bonzini
    if (dbs->bh) {
92 c3adb5b9 Paolo Bonzini
        qemu_bh_delete(dbs->bh);
93 c3adb5b9 Paolo Bonzini
        dbs->bh = NULL;
94 c3adb5b9 Paolo Bonzini
    }
95 c3adb5b9 Paolo Bonzini
    if (!dbs->in_cancel) {
96 c3adb5b9 Paolo Bonzini
        /* Requests may complete while dma_aio_cancel is in progress.  In
97 c3adb5b9 Paolo Bonzini
         * this case, the AIOCB should not be released because it is still
98 c3adb5b9 Paolo Bonzini
         * referenced by dma_aio_cancel.  */
99 c3adb5b9 Paolo Bonzini
        qemu_aio_release(dbs);
100 c3adb5b9 Paolo Bonzini
    }
101 7403b14e aliguori
}
102 7403b14e aliguori
103 856ae5c3 blueswir1
static void dma_bdrv_cb(void *opaque, int ret)
104 7403b14e aliguori
{
105 7403b14e aliguori
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
106 c227f099 Anthony Liguori
    target_phys_addr_t cur_addr, cur_len;
107 7403b14e aliguori
    void *mem;
108 7403b14e aliguori
109 7403b14e aliguori
    dbs->acb = NULL;
110 7403b14e aliguori
    dbs->sector_num += dbs->iov.size / 512;
111 7403b14e aliguori
    dma_bdrv_unmap(dbs);
112 59a703eb aliguori
113 59a703eb aliguori
    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
114 c3adb5b9 Paolo Bonzini
        dma_complete(dbs, ret);
115 59a703eb aliguori
        return;
116 59a703eb aliguori
    }
117 59a703eb aliguori
118 59a703eb aliguori
    while (dbs->sg_cur_index < dbs->sg->nsg) {
119 59a703eb aliguori
        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
120 59a703eb aliguori
        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
121 bbca72c6 Paolo Bonzini
        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->to_dev);
122 59a703eb aliguori
        if (!mem)
123 59a703eb aliguori
            break;
124 59a703eb aliguori
        qemu_iovec_add(&dbs->iov, mem, cur_len);
125 59a703eb aliguori
        dbs->sg_cur_byte += cur_len;
126 59a703eb aliguori
        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
127 59a703eb aliguori
            dbs->sg_cur_byte = 0;
128 59a703eb aliguori
            ++dbs->sg_cur_index;
129 59a703eb aliguori
        }
130 59a703eb aliguori
    }
131 59a703eb aliguori
132 59a703eb aliguori
    if (dbs->iov.size == 0) {
133 59a703eb aliguori
        cpu_register_map_client(dbs, continue_after_map_failure);
134 59a703eb aliguori
        return;
135 59a703eb aliguori
    }
136 59a703eb aliguori
137 cb144ccb Christoph Hellwig
    dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
138 cb144ccb Christoph Hellwig
                            dbs->iov.size / 512, dma_bdrv_cb, dbs);
139 7403b14e aliguori
    if (!dbs->acb) {
140 c3adb5b9 Paolo Bonzini
        dma_complete(dbs, -EIO);
141 7403b14e aliguori
    }
142 59a703eb aliguori
}
143 59a703eb aliguori
144 c16b5a2c Christoph Hellwig
static void dma_aio_cancel(BlockDriverAIOCB *acb)
145 c16b5a2c Christoph Hellwig
{
146 c16b5a2c Christoph Hellwig
    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
147 c16b5a2c Christoph Hellwig
148 c16b5a2c Christoph Hellwig
    if (dbs->acb) {
149 c3adb5b9 Paolo Bonzini
        BlockDriverAIOCB *acb = dbs->acb;
150 c3adb5b9 Paolo Bonzini
        dbs->acb = NULL;
151 c3adb5b9 Paolo Bonzini
        dbs->in_cancel = true;
152 c3adb5b9 Paolo Bonzini
        bdrv_aio_cancel(acb);
153 c3adb5b9 Paolo Bonzini
        dbs->in_cancel = false;
154 c16b5a2c Christoph Hellwig
    }
155 c3adb5b9 Paolo Bonzini
    dbs->common.cb = NULL;
156 c3adb5b9 Paolo Bonzini
    dma_complete(dbs, 0);
157 c16b5a2c Christoph Hellwig
}
158 c16b5a2c Christoph Hellwig
159 c16b5a2c Christoph Hellwig
static AIOPool dma_aio_pool = {
160 c16b5a2c Christoph Hellwig
    .aiocb_size         = sizeof(DMAAIOCB),
161 c16b5a2c Christoph Hellwig
    .cancel             = dma_aio_cancel,
162 c16b5a2c Christoph Hellwig
};
163 c16b5a2c Christoph Hellwig
164 cb144ccb Christoph Hellwig
BlockDriverAIOCB *dma_bdrv_io(
165 59a703eb aliguori
    BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
166 cb144ccb Christoph Hellwig
    DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
167 bbca72c6 Paolo Bonzini
    void *opaque, bool to_dev)
168 59a703eb aliguori
{
169 cb144ccb Christoph Hellwig
    DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque);
170 59a703eb aliguori
171 37b7842c aliguori
    dbs->acb = NULL;
172 59a703eb aliguori
    dbs->bs = bs;
173 59a703eb aliguori
    dbs->sg = sg;
174 59a703eb aliguori
    dbs->sector_num = sector_num;
175 59a703eb aliguori
    dbs->sg_cur_index = 0;
176 59a703eb aliguori
    dbs->sg_cur_byte = 0;
177 bbca72c6 Paolo Bonzini
    dbs->to_dev = to_dev;
178 cb144ccb Christoph Hellwig
    dbs->io_func = io_func;
179 59a703eb aliguori
    dbs->bh = NULL;
180 59a703eb aliguori
    qemu_iovec_init(&dbs->iov, sg->nsg);
181 59a703eb aliguori
    dma_bdrv_cb(dbs, 0);
182 37b7842c aliguori
    return &dbs->common;
183 59a703eb aliguori
}
184 59a703eb aliguori
185 59a703eb aliguori
186 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
187 59a703eb aliguori
                                QEMUSGList *sg, uint64_t sector,
188 59a703eb aliguori
                                void (*cb)(void *opaque, int ret), void *opaque)
189 59a703eb aliguori
{
190 bbca72c6 Paolo Bonzini
    return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque, false);
191 59a703eb aliguori
}
192 59a703eb aliguori
193 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
194 59a703eb aliguori
                                 QEMUSGList *sg, uint64_t sector,
195 59a703eb aliguori
                                 void (*cb)(void *opaque, int ret), void *opaque)
196 59a703eb aliguori
{
197 bbca72c6 Paolo Bonzini
    return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque, true);
198 59a703eb aliguori
}