Statistics
| Branch: | Revision:

root / dma-helpers.c @ 6512a2a7

History | View | Annotate | Download (4.4 kB)

1
/*
2
 * DMA helper functions
3
 *
4
 * Copyright (c) 2009 Red Hat
5
 *
6
 * This work is licensed under the terms of the GNU General Public License
7
 * (GNU GPL), version 2 or later.
8
 */
9

    
10
#include "dma.h"
11
#include "block_int.h"
12

    
13
static AIOPool dma_aio_pool;
14

    
15
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
16
{
17
    qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
18
    qsg->nsg = 0;
19
    qsg->nalloc = alloc_hint;
20
    qsg->size = 0;
21
}
22

    
23
void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
24
                     target_phys_addr_t len)
25
{
26
    if (qsg->nsg == qsg->nalloc) {
27
        qsg->nalloc = 2 * qsg->nalloc + 1;
28
        qsg->sg = qemu_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
29
    }
30
    qsg->sg[qsg->nsg].base = base;
31
    qsg->sg[qsg->nsg].len = len;
32
    qsg->size += len;
33
    ++qsg->nsg;
34
}
35

    
36
void qemu_sglist_destroy(QEMUSGList *qsg)
37
{
38
    qemu_free(qsg->sg);
39
}
40

    
41
typedef struct {
42
    BlockDriverState *bs;
43
    BlockDriverAIOCB *acb;
44
    QEMUSGList *sg;
45
    uint64_t sector_num;
46
    int is_write;
47
    int sg_cur_index;
48
    target_phys_addr_t sg_cur_byte;
49
    QEMUIOVector iov;
50
    QEMUBH *bh;
51
} DMABlockState;
52

    
53
static void dma_bdrv_cb(void *opaque, int ret);
54

    
55
static void reschedule_dma(void *opaque)
56
{
57
    DMABlockState *dbs = (DMABlockState *)opaque;
58

    
59
    qemu_bh_delete(dbs->bh);
60
    dbs->bh = NULL;
61
    dma_bdrv_cb(opaque, 0);
62
}
63

    
64
static void continue_after_map_failure(void *opaque)
65
{
66
    DMABlockState *dbs = (DMABlockState *)opaque;
67

    
68
    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
69
    qemu_bh_schedule(dbs->bh);
70
}
71

    
72
static void dma_bdrv_cb(void *opaque, int ret)
73
{
74
    DMABlockState *dbs = (DMABlockState *)opaque;
75
    target_phys_addr_t cur_addr, cur_len;
76
    void *mem;
77
    int i;
78

    
79
    dbs->sector_num += dbs->iov.size / 512;
80
    for (i = 0; i < dbs->iov.niov; ++i) {
81
        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
82
                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
83
                                  dbs->iov.iov[i].iov_len);
84
    }
85
    qemu_iovec_reset(&dbs->iov);
86

    
87
    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
88
        dbs->acb->cb(dbs->acb->opaque, ret);
89
        qemu_iovec_destroy(&dbs->iov);
90
        qemu_aio_release(dbs->acb);
91
        qemu_free(dbs);
92
        return;
93
    }
94

    
95
    while (dbs->sg_cur_index < dbs->sg->nsg) {
96
        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
97
        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
98
        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
99
        if (!mem)
100
            break;
101
        qemu_iovec_add(&dbs->iov, mem, cur_len);
102
        dbs->sg_cur_byte += cur_len;
103
        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
104
            dbs->sg_cur_byte = 0;
105
            ++dbs->sg_cur_index;
106
        }
107
    }
108

    
109
    if (dbs->iov.size == 0) {
110
        cpu_register_map_client(dbs, continue_after_map_failure);
111
        return;
112
    }
113

    
114
    if (dbs->is_write) {
115
        bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
116
                        dbs->iov.size / 512, dma_bdrv_cb, dbs);
117
    } else {
118
        bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
119
                       dbs->iov.size / 512, dma_bdrv_cb, dbs);
120
    }
121
}
122

    
123
static BlockDriverAIOCB *dma_bdrv_io(
124
    BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
125
    BlockDriverCompletionFunc *cb, void *opaque,
126
    int is_write)
127
{
128
    DMABlockState *dbs = qemu_malloc(sizeof(*dbs));
129

    
130
    dbs->bs = bs;
131
    dbs->acb = qemu_aio_get_pool(&dma_aio_pool, bs, cb, opaque);
132
    dbs->sg = sg;
133
    dbs->sector_num = sector_num;
134
    dbs->sg_cur_index = 0;
135
    dbs->sg_cur_byte = 0;
136
    dbs->is_write = is_write;
137
    dbs->bh = NULL;
138
    qemu_iovec_init(&dbs->iov, sg->nsg);
139
    dma_bdrv_cb(dbs, 0);
140
    return dbs->acb;
141
}
142

    
143

    
144
BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
145
                                QEMUSGList *sg, uint64_t sector,
146
                                void (*cb)(void *opaque, int ret), void *opaque)
147
{
148
    return dma_bdrv_io(bs, sg, sector, cb, opaque, 0);
149
}
150

    
151
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
152
                                 QEMUSGList *sg, uint64_t sector,
153
                                 void (*cb)(void *opaque, int ret), void *opaque)
154
{
155
    return dma_bdrv_io(bs, sg, sector, cb, opaque, 1);
156
}
157

    
158
static void dma_aio_cancel(BlockDriverAIOCB *acb)
159
{
160
    DMABlockState *dbs = (DMABlockState *)acb->opaque;
161

    
162
    bdrv_aio_cancel(dbs->acb);
163
}
164

    
165
void dma_helper_init(void)
166
{
167
    aio_pool_init(&dma_aio_pool, sizeof(BlockDriverAIOCB), dma_aio_cancel);
168
}