Statistics
| Branch: | Revision:

root / include / sysemu / dma.h @ dccfcd0e

History | View | Annotate | Download (9.5 kB)

1 244ab90e aliguori
/*
2 244ab90e aliguori
 * DMA helper functions
3 244ab90e aliguori
 *
4 244ab90e aliguori
 * Copyright (c) 2009 Red Hat
5 244ab90e aliguori
 *
6 244ab90e aliguori
 * This work is licensed under the terms of the GNU General Public License
7 244ab90e aliguori
 * (GNU GPL), version 2 or later.
8 244ab90e aliguori
 */
9 244ab90e aliguori
10 244ab90e aliguori
#ifndef DMA_H
11 244ab90e aliguori
#define DMA_H
12 244ab90e aliguori
13 244ab90e aliguori
#include <stdio.h>
14 022c62cb Paolo Bonzini
#include "exec/memory.h"
15 1ad2134f Paul Brook
#include "hw/hw.h"
16 737e150e Paolo Bonzini
#include "block/block.h"
17 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
18 244ab90e aliguori
19 d86a77f8 David Gibson
typedef struct DMAContext DMAContext;
20 10dc8aef Paolo Bonzini
typedef struct ScatterGatherEntry ScatterGatherEntry;
21 10dc8aef Paolo Bonzini
22 43cf8ae6 David Gibson
typedef enum {
23 43cf8ae6 David Gibson
    DMA_DIRECTION_TO_DEVICE = 0,
24 43cf8ae6 David Gibson
    DMA_DIRECTION_FROM_DEVICE = 1,
25 43cf8ae6 David Gibson
} DMADirection;
26 43cf8ae6 David Gibson
27 fead0c24 Paolo Bonzini
struct QEMUSGList {
28 fead0c24 Paolo Bonzini
    ScatterGatherEntry *sg;
29 fead0c24 Paolo Bonzini
    int nsg;
30 fead0c24 Paolo Bonzini
    int nalloc;
31 fead0c24 Paolo Bonzini
    size_t size;
32 c65bcef3 David Gibson
    DMAContext *dma;
33 fead0c24 Paolo Bonzini
};
34 fead0c24 Paolo Bonzini
35 4be403c8 Avi Kivity
#ifndef CONFIG_USER_ONLY
36 d9d1055e David Gibson
37 e5332e63 David Gibson
/*
38 e5332e63 David Gibson
 * When an IOMMU is present, bus addresses become distinct from
39 e5332e63 David Gibson
 * CPU/memory physical addresses and may be a different size.  Because
40 e5332e63 David Gibson
 * the IOVA size depends more on the bus than on the platform, we more
41 e5332e63 David Gibson
 * or less have to treat these as 64-bit always to cover all (or at
42 e5332e63 David Gibson
 * least most) cases.
43 e5332e63 David Gibson
 */
44 e5332e63 David Gibson
typedef uint64_t dma_addr_t;
45 e5332e63 David Gibson
46 e5332e63 David Gibson
#define DMA_ADDR_BITS 64
47 e5332e63 David Gibson
#define DMA_ADDR_FMT "%" PRIx64
48 e5332e63 David Gibson
49 e5332e63 David Gibson
typedef int DMATranslateFunc(DMAContext *dma,
50 e5332e63 David Gibson
                             dma_addr_t addr,
51 a8170e5e Avi Kivity
                             hwaddr *paddr,
52 a8170e5e Avi Kivity
                             hwaddr *len,
53 e5332e63 David Gibson
                             DMADirection dir);
54 e5332e63 David Gibson
typedef void* DMAMapFunc(DMAContext *dma,
55 e5332e63 David Gibson
                         dma_addr_t addr,
56 e5332e63 David Gibson
                         dma_addr_t *len,
57 e5332e63 David Gibson
                         DMADirection dir);
58 e5332e63 David Gibson
typedef void DMAUnmapFunc(DMAContext *dma,
59 e5332e63 David Gibson
                          void *buffer,
60 e5332e63 David Gibson
                          dma_addr_t len,
61 e5332e63 David Gibson
                          DMADirection dir,
62 e5332e63 David Gibson
                          dma_addr_t access_len);
63 e5332e63 David Gibson
64 e5332e63 David Gibson
struct DMAContext {
65 b90600ee Avi Kivity
    AddressSpace *as;
66 e5332e63 David Gibson
    DMATranslateFunc *translate;
67 e5332e63 David Gibson
    DMAMapFunc *map;
68 e5332e63 David Gibson
    DMAUnmapFunc *unmap;
69 e5332e63 David Gibson
};
70 e5332e63 David Gibson
71 9e11908f Peter Maydell
/* A global DMA context corresponding to the address_space_memory
72 9e11908f Peter Maydell
 * AddressSpace, for sysbus devices which do DMA.
73 9e11908f Peter Maydell
 */
74 9e11908f Peter Maydell
extern DMAContext dma_context_memory;
75 9e11908f Peter Maydell
76 7a0bac4d Benjamin Herrenschmidt
static inline void dma_barrier(DMAContext *dma, DMADirection dir)
77 7a0bac4d Benjamin Herrenschmidt
{
78 7a0bac4d Benjamin Herrenschmidt
    /*
79 7a0bac4d Benjamin Herrenschmidt
     * This is called before DMA read and write operations
80 7a0bac4d Benjamin Herrenschmidt
     * unless the _relaxed form is used and is responsible
81 7a0bac4d Benjamin Herrenschmidt
     * for providing some sane ordering of accesses vs
82 7a0bac4d Benjamin Herrenschmidt
     * concurrently running VCPUs.
83 7a0bac4d Benjamin Herrenschmidt
     *
84 7a0bac4d Benjamin Herrenschmidt
     * Users of map(), unmap() or lower level st/ld_*
85 7a0bac4d Benjamin Herrenschmidt
     * operations are responsible for providing their own
86 7a0bac4d Benjamin Herrenschmidt
     * ordering via barriers.
87 7a0bac4d Benjamin Herrenschmidt
     *
88 7a0bac4d Benjamin Herrenschmidt
     * This primitive implementation does a simple smp_mb()
89 7a0bac4d Benjamin Herrenschmidt
     * before each operation which provides pretty much full
90 7a0bac4d Benjamin Herrenschmidt
     * ordering.
91 7a0bac4d Benjamin Herrenschmidt
     *
92 7a0bac4d Benjamin Herrenschmidt
     * A smarter implementation can be devised if needed to
93 7a0bac4d Benjamin Herrenschmidt
     * use lighter barriers based on the direction of the
94 7a0bac4d Benjamin Herrenschmidt
     * transfer, the DMA context, etc...
95 7a0bac4d Benjamin Herrenschmidt
     */
96 7a0bac4d Benjamin Herrenschmidt
    if (kvm_enabled()) {
97 7a0bac4d Benjamin Herrenschmidt
        smp_mb();
98 7a0bac4d Benjamin Herrenschmidt
    }
99 7a0bac4d Benjamin Herrenschmidt
}
100 7a0bac4d Benjamin Herrenschmidt
101 e5332e63 David Gibson
static inline bool dma_has_iommu(DMAContext *dma)
102 e5332e63 David Gibson
{
103 b90600ee Avi Kivity
    return dma && dma->translate;
104 e5332e63 David Gibson
}
105 d9d1055e David Gibson
106 d86a77f8 David Gibson
/* Checks that the given range of addresses is valid for DMA.  This is
107 d86a77f8 David Gibson
 * useful for certain cases, but usually you should just use
108 d86a77f8 David Gibson
 * dma_memory_{read,write}() and check for errors */
109 e5332e63 David Gibson
bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
110 e5332e63 David Gibson
                            DMADirection dir);
111 e5332e63 David Gibson
static inline bool dma_memory_valid(DMAContext *dma,
112 e5332e63 David Gibson
                                    dma_addr_t addr, dma_addr_t len,
113 e5332e63 David Gibson
                                    DMADirection dir)
114 d86a77f8 David Gibson
{
115 e5332e63 David Gibson
    if (!dma_has_iommu(dma)) {
116 e5332e63 David Gibson
        return true;
117 e5332e63 David Gibson
    } else {
118 e5332e63 David Gibson
        return iommu_dma_memory_valid(dma, addr, len, dir);
119 e5332e63 David Gibson
    }
120 d86a77f8 David Gibson
}
121 d86a77f8 David Gibson
122 e5332e63 David Gibson
int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
123 e5332e63 David Gibson
                        void *buf, dma_addr_t len, DMADirection dir);
124 7a0bac4d Benjamin Herrenschmidt
static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr,
125 7a0bac4d Benjamin Herrenschmidt
                                        void *buf, dma_addr_t len,
126 7a0bac4d Benjamin Herrenschmidt
                                        DMADirection dir)
127 d86a77f8 David Gibson
{
128 e5332e63 David Gibson
    if (!dma_has_iommu(dma)) {
129 e5332e63 David Gibson
        /* Fast-path for no IOMMU */
130 b90600ee Avi Kivity
        address_space_rw(dma->as, addr, buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
131 e5332e63 David Gibson
        return 0;
132 e5332e63 David Gibson
    } else {
133 e5332e63 David Gibson
        return iommu_dma_memory_rw(dma, addr, buf, len, dir);
134 e5332e63 David Gibson
    }
135 d86a77f8 David Gibson
}
136 d86a77f8 David Gibson
137 7a0bac4d Benjamin Herrenschmidt
static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr,
138 7a0bac4d Benjamin Herrenschmidt
                                          void *buf, dma_addr_t len)
139 7a0bac4d Benjamin Herrenschmidt
{
140 7a0bac4d Benjamin Herrenschmidt
    return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
141 7a0bac4d Benjamin Herrenschmidt
}
142 7a0bac4d Benjamin Herrenschmidt
143 7a0bac4d Benjamin Herrenschmidt
static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr,
144 7a0bac4d Benjamin Herrenschmidt
                                           const void *buf, dma_addr_t len)
145 7a0bac4d Benjamin Herrenschmidt
{
146 7a0bac4d Benjamin Herrenschmidt
    return dma_memory_rw_relaxed(dma, addr, (void *)buf, len,
147 7a0bac4d Benjamin Herrenschmidt
                                 DMA_DIRECTION_FROM_DEVICE);
148 7a0bac4d Benjamin Herrenschmidt
}
149 7a0bac4d Benjamin Herrenschmidt
150 7a0bac4d Benjamin Herrenschmidt
static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr,
151 7a0bac4d Benjamin Herrenschmidt
                                void *buf, dma_addr_t len,
152 7a0bac4d Benjamin Herrenschmidt
                                DMADirection dir)
153 7a0bac4d Benjamin Herrenschmidt
{
154 7a0bac4d Benjamin Herrenschmidt
    dma_barrier(dma, dir);
155 7a0bac4d Benjamin Herrenschmidt
156 7a0bac4d Benjamin Herrenschmidt
    return dma_memory_rw_relaxed(dma, addr, buf, len, dir);
157 7a0bac4d Benjamin Herrenschmidt
}
158 7a0bac4d Benjamin Herrenschmidt
159 d86a77f8 David Gibson
static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr,
160 d86a77f8 David Gibson
                                  void *buf, dma_addr_t len)
161 d86a77f8 David Gibson
{
162 d86a77f8 David Gibson
    return dma_memory_rw(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
163 d86a77f8 David Gibson
}
164 d86a77f8 David Gibson
165 d86a77f8 David Gibson
static inline int dma_memory_write(DMAContext *dma, dma_addr_t addr,
166 d86a77f8 David Gibson
                                   const void *buf, dma_addr_t len)
167 d86a77f8 David Gibson
{
168 d86a77f8 David Gibson
    return dma_memory_rw(dma, addr, (void *)buf, len,
169 d86a77f8 David Gibson
                         DMA_DIRECTION_FROM_DEVICE);
170 d86a77f8 David Gibson
}
171 d86a77f8 David Gibson
172 e5332e63 David Gibson
int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
173 e5332e63 David Gibson
                         dma_addr_t len);
174 e5332e63 David Gibson
175 d86a77f8 David Gibson
int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len);
176 d86a77f8 David Gibson
177 e5332e63 David Gibson
void *iommu_dma_memory_map(DMAContext *dma,
178 e5332e63 David Gibson
                           dma_addr_t addr, dma_addr_t *len,
179 e5332e63 David Gibson
                           DMADirection dir);
180 d86a77f8 David Gibson
static inline void *dma_memory_map(DMAContext *dma,
181 d86a77f8 David Gibson
                                   dma_addr_t addr, dma_addr_t *len,
182 d86a77f8 David Gibson
                                   DMADirection dir)
183 d86a77f8 David Gibson
{
184 e5332e63 David Gibson
    if (!dma_has_iommu(dma)) {
185 a8170e5e Avi Kivity
        hwaddr xlen = *len;
186 e5332e63 David Gibson
        void *p;
187 e5332e63 David Gibson
188 b90600ee Avi Kivity
        p = address_space_map(dma->as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE);
189 e5332e63 David Gibson
        *len = xlen;
190 e5332e63 David Gibson
        return p;
191 e5332e63 David Gibson
    } else {
192 e5332e63 David Gibson
        return iommu_dma_memory_map(dma, addr, len, dir);
193 e5332e63 David Gibson
    }
194 d86a77f8 David Gibson
}
195 d86a77f8 David Gibson
196 e5332e63 David Gibson
void iommu_dma_memory_unmap(DMAContext *dma,
197 e5332e63 David Gibson
                            void *buffer, dma_addr_t len,
198 e5332e63 David Gibson
                            DMADirection dir, dma_addr_t access_len);
199 d86a77f8 David Gibson
static inline void dma_memory_unmap(DMAContext *dma,
200 d86a77f8 David Gibson
                                    void *buffer, dma_addr_t len,
201 d86a77f8 David Gibson
                                    DMADirection dir, dma_addr_t access_len)
202 d86a77f8 David Gibson
{
203 e5332e63 David Gibson
    if (!dma_has_iommu(dma)) {
204 a8170e5e Avi Kivity
        address_space_unmap(dma->as, buffer, (hwaddr)len,
205 b90600ee Avi Kivity
                            dir == DMA_DIRECTION_FROM_DEVICE, access_len);
206 e5332e63 David Gibson
    } else {
207 e5332e63 David Gibson
        iommu_dma_memory_unmap(dma, buffer, len, dir, access_len);
208 e5332e63 David Gibson
    }
209 d86a77f8 David Gibson
}
210 d86a77f8 David Gibson
211 d86a77f8 David Gibson
#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
212 d86a77f8 David Gibson
    static inline uint##_bits##_t ld##_lname##_##_end##_dma(DMAContext *dma, \
213 d86a77f8 David Gibson
                                                            dma_addr_t addr) \
214 d86a77f8 David Gibson
    {                                                                   \
215 d86a77f8 David Gibson
        uint##_bits##_t val;                                            \
216 d86a77f8 David Gibson
        dma_memory_read(dma, addr, &val, (_bits) / 8);                  \
217 d86a77f8 David Gibson
        return _end##_bits##_to_cpu(val);                               \
218 d86a77f8 David Gibson
    }                                                                   \
219 d86a77f8 David Gibson
    static inline void st##_sname##_##_end##_dma(DMAContext *dma,       \
220 d86a77f8 David Gibson
                                                 dma_addr_t addr,       \
221 d86a77f8 David Gibson
                                                 uint##_bits##_t val)   \
222 d86a77f8 David Gibson
    {                                                                   \
223 d86a77f8 David Gibson
        val = cpu_to_##_end##_bits(val);                                \
224 d86a77f8 David Gibson
        dma_memory_write(dma, addr, &val, (_bits) / 8);                 \
225 d86a77f8 David Gibson
    }
226 d86a77f8 David Gibson
227 d86a77f8 David Gibson
static inline uint8_t ldub_dma(DMAContext *dma, dma_addr_t addr)
228 d86a77f8 David Gibson
{
229 d86a77f8 David Gibson
    uint8_t val;
230 d86a77f8 David Gibson
231 d86a77f8 David Gibson
    dma_memory_read(dma, addr, &val, 1);
232 d86a77f8 David Gibson
    return val;
233 d86a77f8 David Gibson
}
234 d86a77f8 David Gibson
235 d86a77f8 David Gibson
static inline void stb_dma(DMAContext *dma, dma_addr_t addr, uint8_t val)
236 d86a77f8 David Gibson
{
237 d86a77f8 David Gibson
    dma_memory_write(dma, addr, &val, 1);
238 d86a77f8 David Gibson
}
239 d86a77f8 David Gibson
240 d86a77f8 David Gibson
DEFINE_LDST_DMA(uw, w, 16, le);
241 d86a77f8 David Gibson
DEFINE_LDST_DMA(l, l, 32, le);
242 d86a77f8 David Gibson
DEFINE_LDST_DMA(q, q, 64, le);
243 d86a77f8 David Gibson
DEFINE_LDST_DMA(uw, w, 16, be);
244 d86a77f8 David Gibson
DEFINE_LDST_DMA(l, l, 32, be);
245 d86a77f8 David Gibson
DEFINE_LDST_DMA(q, q, 64, be);
246 d86a77f8 David Gibson
247 d86a77f8 David Gibson
#undef DEFINE_LDST_DMA
248 d86a77f8 David Gibson
249 b90600ee Avi Kivity
void dma_context_init(DMAContext *dma, AddressSpace *as, DMATranslateFunc translate,
250 e5332e63 David Gibson
                      DMAMapFunc map, DMAUnmapFunc unmap);
251 e5332e63 David Gibson
252 10dc8aef Paolo Bonzini
struct ScatterGatherEntry {
253 d3231181 David Gibson
    dma_addr_t base;
254 d3231181 David Gibson
    dma_addr_t len;
255 10dc8aef Paolo Bonzini
};
256 244ab90e aliguori
257 c65bcef3 David Gibson
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma);
258 d3231181 David Gibson
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
259 244ab90e aliguori
void qemu_sglist_destroy(QEMUSGList *qsg);
260 10dc8aef Paolo Bonzini
#endif
261 244ab90e aliguori
262 cb144ccb Christoph Hellwig
typedef BlockDriverAIOCB *DMAIOFunc(BlockDriverState *bs, int64_t sector_num,
263 cb144ccb Christoph Hellwig
                                 QEMUIOVector *iov, int nb_sectors,
264 cb144ccb Christoph Hellwig
                                 BlockDriverCompletionFunc *cb, void *opaque);
265 cb144ccb Christoph Hellwig
266 cb144ccb Christoph Hellwig
BlockDriverAIOCB *dma_bdrv_io(BlockDriverState *bs,
267 cb144ccb Christoph Hellwig
                              QEMUSGList *sg, uint64_t sector_num,
268 cb144ccb Christoph Hellwig
                              DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
269 43cf8ae6 David Gibson
                              void *opaque, DMADirection dir);
270 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
271 59a703eb aliguori
                                QEMUSGList *sg, uint64_t sector,
272 59a703eb aliguori
                                BlockDriverCompletionFunc *cb, void *opaque);
273 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
274 59a703eb aliguori
                                 QEMUSGList *sg, uint64_t sector,
275 59a703eb aliguori
                                 BlockDriverCompletionFunc *cb, void *opaque);
276 8171ee35 Paolo Bonzini
uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
277 8171ee35 Paolo Bonzini
uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
278 8171ee35 Paolo Bonzini
279 84a69356 Paolo Bonzini
void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
280 84a69356 Paolo Bonzini
                    QEMUSGList *sg, enum BlockAcctType type);
281 84a69356 Paolo Bonzini
282 244ab90e aliguori
#endif