Statistics
| Branch: | Revision:

root / dma-helpers.c @ 14015304

History | View | Annotate | Download (4.6 kB)

1 244ab90e aliguori
/*
2 244ab90e aliguori
 * DMA helper functions
3 244ab90e aliguori
 *
4 244ab90e aliguori
 * Copyright (c) 2009 Red Hat
5 244ab90e aliguori
 *
6 244ab90e aliguori
 * This work is licensed under the terms of the GNU General Public License
7 244ab90e aliguori
 * (GNU GPL), version 2 or later.
8 244ab90e aliguori
 */
9 244ab90e aliguori
10 244ab90e aliguori
#include "dma.h"
11 59a703eb aliguori
#include "block_int.h"
12 244ab90e aliguori
13 244ab90e aliguori
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
14 244ab90e aliguori
{
15 244ab90e aliguori
    qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
16 244ab90e aliguori
    qsg->nsg = 0;
17 244ab90e aliguori
    qsg->nalloc = alloc_hint;
18 244ab90e aliguori
    qsg->size = 0;
19 244ab90e aliguori
}
20 244ab90e aliguori
21 c227f099 Anthony Liguori
void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
22 c227f099 Anthony Liguori
                     target_phys_addr_t len)
23 244ab90e aliguori
{
24 244ab90e aliguori
    if (qsg->nsg == qsg->nalloc) {
25 244ab90e aliguori
        qsg->nalloc = 2 * qsg->nalloc + 1;
26 244ab90e aliguori
        qsg->sg = qemu_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
27 244ab90e aliguori
    }
28 244ab90e aliguori
    qsg->sg[qsg->nsg].base = base;
29 244ab90e aliguori
    qsg->sg[qsg->nsg].len = len;
30 244ab90e aliguori
    qsg->size += len;
31 244ab90e aliguori
    ++qsg->nsg;
32 244ab90e aliguori
}
33 244ab90e aliguori
34 244ab90e aliguori
void qemu_sglist_destroy(QEMUSGList *qsg)
35 244ab90e aliguori
{
36 244ab90e aliguori
    qemu_free(qsg->sg);
37 244ab90e aliguori
}
38 244ab90e aliguori
39 59a703eb aliguori
typedef struct {
40 37b7842c aliguori
    BlockDriverAIOCB common;
41 59a703eb aliguori
    BlockDriverState *bs;
42 59a703eb aliguori
    BlockDriverAIOCB *acb;
43 59a703eb aliguori
    QEMUSGList *sg;
44 59a703eb aliguori
    uint64_t sector_num;
45 59a703eb aliguori
    int is_write;
46 59a703eb aliguori
    int sg_cur_index;
47 c227f099 Anthony Liguori
    target_phys_addr_t sg_cur_byte;
48 59a703eb aliguori
    QEMUIOVector iov;
49 59a703eb aliguori
    QEMUBH *bh;
50 cb144ccb Christoph Hellwig
    DMAIOFunc *io_func;
51 37b7842c aliguori
} DMAAIOCB;
52 59a703eb aliguori
53 59a703eb aliguori
static void dma_bdrv_cb(void *opaque, int ret);
54 59a703eb aliguori
55 59a703eb aliguori
static void reschedule_dma(void *opaque)
56 59a703eb aliguori
{
57 37b7842c aliguori
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
58 59a703eb aliguori
59 59a703eb aliguori
    qemu_bh_delete(dbs->bh);
60 59a703eb aliguori
    dbs->bh = NULL;
61 59a703eb aliguori
    dma_bdrv_cb(opaque, 0);
62 59a703eb aliguori
}
63 59a703eb aliguori
64 59a703eb aliguori
static void continue_after_map_failure(void *opaque)
65 59a703eb aliguori
{
66 37b7842c aliguori
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
67 59a703eb aliguori
68 59a703eb aliguori
    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
69 59a703eb aliguori
    qemu_bh_schedule(dbs->bh);
70 59a703eb aliguori
}
71 59a703eb aliguori
72 7403b14e aliguori
static void dma_bdrv_unmap(DMAAIOCB *dbs)
73 59a703eb aliguori
{
74 59a703eb aliguori
    int i;
75 59a703eb aliguori
76 59a703eb aliguori
    for (i = 0; i < dbs->iov.niov; ++i) {
77 59a703eb aliguori
        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
78 59a703eb aliguori
                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
79 59a703eb aliguori
                                  dbs->iov.iov[i].iov_len);
80 59a703eb aliguori
    }
81 7403b14e aliguori
}
82 7403b14e aliguori
83 856ae5c3 blueswir1
static void dma_bdrv_cb(void *opaque, int ret)
84 7403b14e aliguori
{
85 7403b14e aliguori
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
86 c227f099 Anthony Liguori
    target_phys_addr_t cur_addr, cur_len;
87 7403b14e aliguori
    void *mem;
88 7403b14e aliguori
89 7403b14e aliguori
    dbs->acb = NULL;
90 7403b14e aliguori
    dbs->sector_num += dbs->iov.size / 512;
91 7403b14e aliguori
    dma_bdrv_unmap(dbs);
92 59a703eb aliguori
    qemu_iovec_reset(&dbs->iov);
93 59a703eb aliguori
94 59a703eb aliguori
    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
95 37b7842c aliguori
        dbs->common.cb(dbs->common.opaque, ret);
96 59a703eb aliguori
        qemu_iovec_destroy(&dbs->iov);
97 37b7842c aliguori
        qemu_aio_release(dbs);
98 59a703eb aliguori
        return;
99 59a703eb aliguori
    }
100 59a703eb aliguori
101 59a703eb aliguori
    while (dbs->sg_cur_index < dbs->sg->nsg) {
102 59a703eb aliguori
        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
103 59a703eb aliguori
        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
104 59a703eb aliguori
        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
105 59a703eb aliguori
        if (!mem)
106 59a703eb aliguori
            break;
107 59a703eb aliguori
        qemu_iovec_add(&dbs->iov, mem, cur_len);
108 59a703eb aliguori
        dbs->sg_cur_byte += cur_len;
109 59a703eb aliguori
        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
110 59a703eb aliguori
            dbs->sg_cur_byte = 0;
111 59a703eb aliguori
            ++dbs->sg_cur_index;
112 59a703eb aliguori
        }
113 59a703eb aliguori
    }
114 59a703eb aliguori
115 59a703eb aliguori
    if (dbs->iov.size == 0) {
116 59a703eb aliguori
        cpu_register_map_client(dbs, continue_after_map_failure);
117 59a703eb aliguori
        return;
118 59a703eb aliguori
    }
119 59a703eb aliguori
120 cb144ccb Christoph Hellwig
    dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
121 cb144ccb Christoph Hellwig
                            dbs->iov.size / 512, dma_bdrv_cb, dbs);
122 7403b14e aliguori
    if (!dbs->acb) {
123 7403b14e aliguori
        dma_bdrv_unmap(dbs);
124 7403b14e aliguori
        qemu_iovec_destroy(&dbs->iov);
125 7403b14e aliguori
        return;
126 7403b14e aliguori
    }
127 59a703eb aliguori
}
128 59a703eb aliguori
129 c16b5a2c Christoph Hellwig
static void dma_aio_cancel(BlockDriverAIOCB *acb)
130 c16b5a2c Christoph Hellwig
{
131 c16b5a2c Christoph Hellwig
    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
132 c16b5a2c Christoph Hellwig
133 c16b5a2c Christoph Hellwig
    if (dbs->acb) {
134 c16b5a2c Christoph Hellwig
        bdrv_aio_cancel(dbs->acb);
135 c16b5a2c Christoph Hellwig
    }
136 c16b5a2c Christoph Hellwig
}
137 c16b5a2c Christoph Hellwig
138 c16b5a2c Christoph Hellwig
static AIOPool dma_aio_pool = {
139 c16b5a2c Christoph Hellwig
    .aiocb_size         = sizeof(DMAAIOCB),
140 c16b5a2c Christoph Hellwig
    .cancel             = dma_aio_cancel,
141 c16b5a2c Christoph Hellwig
};
142 c16b5a2c Christoph Hellwig
143 cb144ccb Christoph Hellwig
BlockDriverAIOCB *dma_bdrv_io(
144 59a703eb aliguori
    BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
145 cb144ccb Christoph Hellwig
    DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
146 cb144ccb Christoph Hellwig
    void *opaque, int is_write)
147 59a703eb aliguori
{
148 cb144ccb Christoph Hellwig
    DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque);
149 59a703eb aliguori
150 37b7842c aliguori
    dbs->acb = NULL;
151 59a703eb aliguori
    dbs->bs = bs;
152 59a703eb aliguori
    dbs->sg = sg;
153 59a703eb aliguori
    dbs->sector_num = sector_num;
154 59a703eb aliguori
    dbs->sg_cur_index = 0;
155 59a703eb aliguori
    dbs->sg_cur_byte = 0;
156 59a703eb aliguori
    dbs->is_write = is_write;
157 cb144ccb Christoph Hellwig
    dbs->io_func = io_func;
158 59a703eb aliguori
    dbs->bh = NULL;
159 59a703eb aliguori
    qemu_iovec_init(&dbs->iov, sg->nsg);
160 59a703eb aliguori
    dma_bdrv_cb(dbs, 0);
161 7403b14e aliguori
    if (!dbs->acb) {
162 7403b14e aliguori
        qemu_aio_release(dbs);
163 7403b14e aliguori
        return NULL;
164 7403b14e aliguori
    }
165 37b7842c aliguori
    return &dbs->common;
166 59a703eb aliguori
}
167 59a703eb aliguori
168 59a703eb aliguori
169 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
170 59a703eb aliguori
                                QEMUSGList *sg, uint64_t sector,
171 59a703eb aliguori
                                void (*cb)(void *opaque, int ret), void *opaque)
172 59a703eb aliguori
{
173 cb144ccb Christoph Hellwig
    return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque, 0);
174 59a703eb aliguori
}
175 59a703eb aliguori
176 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
177 59a703eb aliguori
                                 QEMUSGList *sg, uint64_t sector,
178 59a703eb aliguori
                                 void (*cb)(void *opaque, int ret), void *opaque)
179 59a703eb aliguori
{
180 cb144ccb Christoph Hellwig
    return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque, 1);
181 59a703eb aliguori
}