Statistics
| Branch: | Revision:

root / include / sysemu / dma.h @ df32fd1c

History | View | Annotate | Download (7.4 kB)

1
/*
2
 * DMA helper functions
3
 *
4
 * Copyright (c) 2009 Red Hat
5
 *
6
 * This work is licensed under the terms of the GNU General Public License
7
 * (GNU GPL), version 2 or later.
8
 */
9

    
10
#ifndef DMA_H
11
#define DMA_H
12

    
13
#include <stdio.h>
14
#include "exec/memory.h"
15
#include "exec/address-spaces.h"
16
#include "hw/hw.h"
17
#include "block/block.h"
18
#include "sysemu/kvm.h"
19

    
20
typedef struct ScatterGatherEntry ScatterGatherEntry;
21

    
22
typedef enum {
23
    DMA_DIRECTION_TO_DEVICE = 0,
24
    DMA_DIRECTION_FROM_DEVICE = 1,
25
} DMADirection;
26

    
27
struct QEMUSGList {
28
    ScatterGatherEntry *sg;
29
    int nsg;
30
    int nalloc;
31
    size_t size;
32
    AddressSpace *as;
33
};
34

    
35
#ifndef CONFIG_USER_ONLY
36

    
37
/*
38
 * When an IOMMU is present, bus addresses become distinct from
39
 * CPU/memory physical addresses and may be a different size.  Because
40
 * the IOVA size depends more on the bus than on the platform, we more
41
 * or less have to treat these as 64-bit always to cover all (or at
42
 * least most) cases.
43
 */
44
typedef uint64_t dma_addr_t;
45

    
46
#define DMA_ADDR_BITS 64
47
#define DMA_ADDR_FMT "%" PRIx64
48

    
49
static inline void dma_barrier(AddressSpace *as, DMADirection dir)
50
{
51
    /*
52
     * This is called before DMA read and write operations
53
     * unless the _relaxed form is used and is responsible
54
     * for providing some sane ordering of accesses vs
55
     * concurrently running VCPUs.
56
     *
57
     * Users of map(), unmap() or lower level st/ld_*
58
     * operations are responsible for providing their own
59
     * ordering via barriers.
60
     *
61
     * This primitive implementation does a simple smp_mb()
62
     * before each operation which provides pretty much full
63
     * ordering.
64
     *
65
     * A smarter implementation can be devised if needed to
66
     * use lighter barriers based on the direction of the
67
     * transfer, the DMA context, etc...
68
     */
69
    if (kvm_enabled()) {
70
        smp_mb();
71
    }
72
}
73

    
74
/* Checks that the given range of addresses is valid for DMA.  This is
75
 * useful for certain cases, but usually you should just use
76
 * dma_memory_{read,write}() and check for errors */
77
static inline bool dma_memory_valid(AddressSpace *as,
78
                                    dma_addr_t addr, dma_addr_t len,
79
                                    DMADirection dir)
80
{
81
    return address_space_access_valid(as, addr, len,
82
                                      dir == DMA_DIRECTION_FROM_DEVICE);
83
}
84

    
85
static inline int dma_memory_rw_relaxed(AddressSpace *as, dma_addr_t addr,
86
                                        void *buf, dma_addr_t len,
87
                                        DMADirection dir)
88
{
89
    return address_space_rw(as, addr, buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
90
}
91

    
92
static inline int dma_memory_read_relaxed(AddressSpace *as, dma_addr_t addr,
93
                                          void *buf, dma_addr_t len)
94
{
95
    return dma_memory_rw_relaxed(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
96
}
97

    
98
static inline int dma_memory_write_relaxed(AddressSpace *as, dma_addr_t addr,
99
                                           const void *buf, dma_addr_t len)
100
{
101
    return dma_memory_rw_relaxed(as, addr, (void *)buf, len,
102
                                 DMA_DIRECTION_FROM_DEVICE);
103
}
104

    
105
static inline int dma_memory_rw(AddressSpace *as, dma_addr_t addr,
106
                                void *buf, dma_addr_t len,
107
                                DMADirection dir)
108
{
109
    dma_barrier(as, dir);
110

    
111
    return dma_memory_rw_relaxed(as, addr, buf, len, dir);
112
}
113

    
114
static inline int dma_memory_read(AddressSpace *as, dma_addr_t addr,
115
                                  void *buf, dma_addr_t len)
116
{
117
    return dma_memory_rw(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
118
}
119

    
120
static inline int dma_memory_write(AddressSpace *as, dma_addr_t addr,
121
                                   const void *buf, dma_addr_t len)
122
{
123
    return dma_memory_rw(as, addr, (void *)buf, len,
124
                         DMA_DIRECTION_FROM_DEVICE);
125
}
126

    
127
int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len);
128

    
129
static inline void *dma_memory_map(AddressSpace *as,
130
                                   dma_addr_t addr, dma_addr_t *len,
131
                                   DMADirection dir)
132
{
133
    hwaddr xlen = *len;
134
    void *p;
135

    
136
    p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE);
137
    *len = xlen;
138
    return p;
139
}
140

    
141
static inline void dma_memory_unmap(AddressSpace *as,
142
                                    void *buffer, dma_addr_t len,
143
                                    DMADirection dir, dma_addr_t access_len)
144
{
145
    address_space_unmap(as, buffer, (hwaddr)len,
146
                        dir == DMA_DIRECTION_FROM_DEVICE, access_len);
147
}
148

    
149
#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
150
    static inline uint##_bits##_t ld##_lname##_##_end##_dma(AddressSpace *as, \
151
                                                            dma_addr_t addr) \
152
    {                                                                   \
153
        uint##_bits##_t val;                                            \
154
        dma_memory_read(as, addr, &val, (_bits) / 8);                   \
155
        return _end##_bits##_to_cpu(val);                               \
156
    }                                                                   \
157
    static inline void st##_sname##_##_end##_dma(AddressSpace *as,      \
158
                                                 dma_addr_t addr,       \
159
                                                 uint##_bits##_t val)   \
160
    {                                                                   \
161
        val = cpu_to_##_end##_bits(val);                                \
162
        dma_memory_write(as, addr, &val, (_bits) / 8);                  \
163
    }
164

    
165
static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr)
166
{
167
    uint8_t val;
168

    
169
    dma_memory_read(as, addr, &val, 1);
170
    return val;
171
}
172

    
173
static inline void stb_dma(AddressSpace *as, dma_addr_t addr, uint8_t val)
174
{
175
    dma_memory_write(as, addr, &val, 1);
176
}
177

    
178
DEFINE_LDST_DMA(uw, w, 16, le);
179
DEFINE_LDST_DMA(l, l, 32, le);
180
DEFINE_LDST_DMA(q, q, 64, le);
181
DEFINE_LDST_DMA(uw, w, 16, be);
182
DEFINE_LDST_DMA(l, l, 32, be);
183
DEFINE_LDST_DMA(q, q, 64, be);
184

    
185
#undef DEFINE_LDST_DMA
186

    
187
struct ScatterGatherEntry {
188
    dma_addr_t base;
189
    dma_addr_t len;
190
};
191

    
192
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, AddressSpace *as);
193
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
194
void qemu_sglist_destroy(QEMUSGList *qsg);
195
#endif
196

    
197
typedef BlockDriverAIOCB *DMAIOFunc(BlockDriverState *bs, int64_t sector_num,
198
                                 QEMUIOVector *iov, int nb_sectors,
199
                                 BlockDriverCompletionFunc *cb, void *opaque);
200

    
201
BlockDriverAIOCB *dma_bdrv_io(BlockDriverState *bs,
202
                              QEMUSGList *sg, uint64_t sector_num,
203
                              DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
204
                              void *opaque, DMADirection dir);
205
BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
206
                                QEMUSGList *sg, uint64_t sector,
207
                                BlockDriverCompletionFunc *cb, void *opaque);
208
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
209
                                 QEMUSGList *sg, uint64_t sector,
210
                                 BlockDriverCompletionFunc *cb, void *opaque);
211
uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
212
uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
213

    
214
void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
215
                    QEMUSGList *sg, enum BlockAcctType type);
216

    
217
#endif