Statistics
| Branch: | Revision:

root / dma.h @ 0834c9ea

History | View | Annotate | Download (9.4 kB)

1 244ab90e aliguori
/*
2 244ab90e aliguori
 * DMA helper functions
3 244ab90e aliguori
 *
4 244ab90e aliguori
 * Copyright (c) 2009 Red Hat
5 244ab90e aliguori
 *
6 244ab90e aliguori
 * This work is licensed under the terms of the GNU General Public License
7 244ab90e aliguori
 * (GNU GPL), version 2 or later.
8 244ab90e aliguori
 */
9 244ab90e aliguori
10 244ab90e aliguori
#ifndef DMA_H
11 244ab90e aliguori
#define DMA_H
12 244ab90e aliguori
13 244ab90e aliguori
#include <stdio.h>
14 1ad2134f Paul Brook
#include "hw/hw.h"
15 59a703eb aliguori
#include "block.h"
16 7a0bac4d Benjamin Herrenschmidt
#include "kvm.h"
17 244ab90e aliguori
18 d86a77f8 David Gibson
typedef struct DMAContext DMAContext;
19 10dc8aef Paolo Bonzini
typedef struct ScatterGatherEntry ScatterGatherEntry;
20 10dc8aef Paolo Bonzini
21 43cf8ae6 David Gibson
typedef enum {
22 43cf8ae6 David Gibson
    DMA_DIRECTION_TO_DEVICE = 0,
23 43cf8ae6 David Gibson
    DMA_DIRECTION_FROM_DEVICE = 1,
24 43cf8ae6 David Gibson
} DMADirection;
25 43cf8ae6 David Gibson
26 fead0c24 Paolo Bonzini
struct QEMUSGList {
27 fead0c24 Paolo Bonzini
    ScatterGatherEntry *sg;
28 fead0c24 Paolo Bonzini
    int nsg;
29 fead0c24 Paolo Bonzini
    int nalloc;
30 fead0c24 Paolo Bonzini
    size_t size;
31 c65bcef3 David Gibson
    DMAContext *dma;
32 fead0c24 Paolo Bonzini
};
33 fead0c24 Paolo Bonzini
34 10dc8aef Paolo Bonzini
#if defined(TARGET_PHYS_ADDR_BITS)
35 d9d1055e David Gibson
36 e5332e63 David Gibson
/*
37 e5332e63 David Gibson
 * When an IOMMU is present, bus addresses become distinct from
38 e5332e63 David Gibson
 * CPU/memory physical addresses and may be a different size.  Because
39 e5332e63 David Gibson
 * the IOVA size depends more on the bus than on the platform, we more
40 e5332e63 David Gibson
 * or less have to treat these as 64-bit always to cover all (or at
41 e5332e63 David Gibson
 * least most) cases.
42 e5332e63 David Gibson
 */
43 e5332e63 David Gibson
typedef uint64_t dma_addr_t;
44 e5332e63 David Gibson
45 e5332e63 David Gibson
#define DMA_ADDR_BITS 64
46 e5332e63 David Gibson
#define DMA_ADDR_FMT "%" PRIx64
47 e5332e63 David Gibson
48 e5332e63 David Gibson
typedef int DMATranslateFunc(DMAContext *dma,
49 e5332e63 David Gibson
                             dma_addr_t addr,
50 e5332e63 David Gibson
                             target_phys_addr_t *paddr,
51 e5332e63 David Gibson
                             target_phys_addr_t *len,
52 e5332e63 David Gibson
                             DMADirection dir);
53 e5332e63 David Gibson
typedef void* DMAMapFunc(DMAContext *dma,
54 e5332e63 David Gibson
                         dma_addr_t addr,
55 e5332e63 David Gibson
                         dma_addr_t *len,
56 e5332e63 David Gibson
                         DMADirection dir);
57 e5332e63 David Gibson
typedef void DMAUnmapFunc(DMAContext *dma,
58 e5332e63 David Gibson
                          void *buffer,
59 e5332e63 David Gibson
                          dma_addr_t len,
60 e5332e63 David Gibson
                          DMADirection dir,
61 e5332e63 David Gibson
                          dma_addr_t access_len);
62 e5332e63 David Gibson
63 e5332e63 David Gibson
struct DMAContext {
64 e5332e63 David Gibson
    DMATranslateFunc *translate;
65 e5332e63 David Gibson
    DMAMapFunc *map;
66 e5332e63 David Gibson
    DMAUnmapFunc *unmap;
67 e5332e63 David Gibson
};
68 e5332e63 David Gibson
69 7a0bac4d Benjamin Herrenschmidt
static inline void dma_barrier(DMAContext *dma, DMADirection dir)
70 7a0bac4d Benjamin Herrenschmidt
{
71 7a0bac4d Benjamin Herrenschmidt
    /*
72 7a0bac4d Benjamin Herrenschmidt
     * This is called before DMA read and write operations
73 7a0bac4d Benjamin Herrenschmidt
     * unless the _relaxed form is used and is responsible
74 7a0bac4d Benjamin Herrenschmidt
     * for providing some sane ordering of accesses vs
75 7a0bac4d Benjamin Herrenschmidt
     * concurrently running VCPUs.
76 7a0bac4d Benjamin Herrenschmidt
     *
77 7a0bac4d Benjamin Herrenschmidt
     * Users of map(), unmap() or lower level st/ld_*
78 7a0bac4d Benjamin Herrenschmidt
     * operations are responsible for providing their own
79 7a0bac4d Benjamin Herrenschmidt
     * ordering via barriers.
80 7a0bac4d Benjamin Herrenschmidt
     *
81 7a0bac4d Benjamin Herrenschmidt
     * This primitive implementation does a simple smp_mb()
82 7a0bac4d Benjamin Herrenschmidt
     * before each operation which provides pretty much full
83 7a0bac4d Benjamin Herrenschmidt
     * ordering.
84 7a0bac4d Benjamin Herrenschmidt
     *
85 7a0bac4d Benjamin Herrenschmidt
     * A smarter implementation can be devised if needed to
86 7a0bac4d Benjamin Herrenschmidt
     * use lighter barriers based on the direction of the
87 7a0bac4d Benjamin Herrenschmidt
     * transfer, the DMA context, etc...
88 7a0bac4d Benjamin Herrenschmidt
     */
89 7a0bac4d Benjamin Herrenschmidt
    if (kvm_enabled()) {
90 7a0bac4d Benjamin Herrenschmidt
        smp_mb();
91 7a0bac4d Benjamin Herrenschmidt
    }
92 7a0bac4d Benjamin Herrenschmidt
}
93 7a0bac4d Benjamin Herrenschmidt
94 e5332e63 David Gibson
static inline bool dma_has_iommu(DMAContext *dma)
95 e5332e63 David Gibson
{
96 e5332e63 David Gibson
    return !!dma;
97 e5332e63 David Gibson
}
98 d9d1055e David Gibson
99 d86a77f8 David Gibson
/* Checks that the given range of addresses is valid for DMA.  This is
100 d86a77f8 David Gibson
 * useful for certain cases, but usually you should just use
101 d86a77f8 David Gibson
 * dma_memory_{read,write}() and check for errors */
102 e5332e63 David Gibson
bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
103 e5332e63 David Gibson
                            DMADirection dir);
104 e5332e63 David Gibson
static inline bool dma_memory_valid(DMAContext *dma,
105 e5332e63 David Gibson
                                    dma_addr_t addr, dma_addr_t len,
106 e5332e63 David Gibson
                                    DMADirection dir)
107 d86a77f8 David Gibson
{
108 e5332e63 David Gibson
    if (!dma_has_iommu(dma)) {
109 e5332e63 David Gibson
        return true;
110 e5332e63 David Gibson
    } else {
111 e5332e63 David Gibson
        return iommu_dma_memory_valid(dma, addr, len, dir);
112 e5332e63 David Gibson
    }
113 d86a77f8 David Gibson
}
114 d86a77f8 David Gibson
115 e5332e63 David Gibson
int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
116 e5332e63 David Gibson
                        void *buf, dma_addr_t len, DMADirection dir);
117 7a0bac4d Benjamin Herrenschmidt
static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr,
118 7a0bac4d Benjamin Herrenschmidt
                                        void *buf, dma_addr_t len,
119 7a0bac4d Benjamin Herrenschmidt
                                        DMADirection dir)
120 d86a77f8 David Gibson
{
121 e5332e63 David Gibson
    if (!dma_has_iommu(dma)) {
122 e5332e63 David Gibson
        /* Fast-path for no IOMMU */
123 e5332e63 David Gibson
        cpu_physical_memory_rw(addr, buf, len,
124 e5332e63 David Gibson
                               dir == DMA_DIRECTION_FROM_DEVICE);
125 e5332e63 David Gibson
        return 0;
126 e5332e63 David Gibson
    } else {
127 e5332e63 David Gibson
        return iommu_dma_memory_rw(dma, addr, buf, len, dir);
128 e5332e63 David Gibson
    }
129 d86a77f8 David Gibson
}
130 d86a77f8 David Gibson
131 7a0bac4d Benjamin Herrenschmidt
static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr,
132 7a0bac4d Benjamin Herrenschmidt
                                          void *buf, dma_addr_t len)
133 7a0bac4d Benjamin Herrenschmidt
{
134 7a0bac4d Benjamin Herrenschmidt
    return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
135 7a0bac4d Benjamin Herrenschmidt
}
136 7a0bac4d Benjamin Herrenschmidt
137 7a0bac4d Benjamin Herrenschmidt
static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr,
138 7a0bac4d Benjamin Herrenschmidt
                                           const void *buf, dma_addr_t len)
139 7a0bac4d Benjamin Herrenschmidt
{
140 7a0bac4d Benjamin Herrenschmidt
    return dma_memory_rw_relaxed(dma, addr, (void *)buf, len,
141 7a0bac4d Benjamin Herrenschmidt
                                 DMA_DIRECTION_FROM_DEVICE);
142 7a0bac4d Benjamin Herrenschmidt
}
143 7a0bac4d Benjamin Herrenschmidt
144 7a0bac4d Benjamin Herrenschmidt
static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr,
145 7a0bac4d Benjamin Herrenschmidt
                                void *buf, dma_addr_t len,
146 7a0bac4d Benjamin Herrenschmidt
                                DMADirection dir)
147 7a0bac4d Benjamin Herrenschmidt
{
148 7a0bac4d Benjamin Herrenschmidt
    dma_barrier(dma, dir);
149 7a0bac4d Benjamin Herrenschmidt
150 7a0bac4d Benjamin Herrenschmidt
    return dma_memory_rw_relaxed(dma, addr, buf, len, dir);
151 7a0bac4d Benjamin Herrenschmidt
}
152 7a0bac4d Benjamin Herrenschmidt
153 d86a77f8 David Gibson
static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr,
154 d86a77f8 David Gibson
                                  void *buf, dma_addr_t len)
155 d86a77f8 David Gibson
{
156 d86a77f8 David Gibson
    return dma_memory_rw(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
157 d86a77f8 David Gibson
}
158 d86a77f8 David Gibson
159 d86a77f8 David Gibson
static inline int dma_memory_write(DMAContext *dma, dma_addr_t addr,
160 d86a77f8 David Gibson
                                   const void *buf, dma_addr_t len)
161 d86a77f8 David Gibson
{
162 d86a77f8 David Gibson
    return dma_memory_rw(dma, addr, (void *)buf, len,
163 d86a77f8 David Gibson
                         DMA_DIRECTION_FROM_DEVICE);
164 d86a77f8 David Gibson
}
165 d86a77f8 David Gibson
166 e5332e63 David Gibson
int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
167 e5332e63 David Gibson
                         dma_addr_t len);
168 e5332e63 David Gibson
169 d86a77f8 David Gibson
int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len);
170 d86a77f8 David Gibson
171 e5332e63 David Gibson
void *iommu_dma_memory_map(DMAContext *dma,
172 e5332e63 David Gibson
                           dma_addr_t addr, dma_addr_t *len,
173 e5332e63 David Gibson
                           DMADirection dir);
174 d86a77f8 David Gibson
static inline void *dma_memory_map(DMAContext *dma,
175 d86a77f8 David Gibson
                                   dma_addr_t addr, dma_addr_t *len,
176 d86a77f8 David Gibson
                                   DMADirection dir)
177 d86a77f8 David Gibson
{
178 e5332e63 David Gibson
    if (!dma_has_iommu(dma)) {
179 e5332e63 David Gibson
        target_phys_addr_t xlen = *len;
180 e5332e63 David Gibson
        void *p;
181 e5332e63 David Gibson
182 e5332e63 David Gibson
        p = cpu_physical_memory_map(addr, &xlen,
183 e5332e63 David Gibson
                                    dir == DMA_DIRECTION_FROM_DEVICE);
184 e5332e63 David Gibson
        *len = xlen;
185 e5332e63 David Gibson
        return p;
186 e5332e63 David Gibson
    } else {
187 e5332e63 David Gibson
        return iommu_dma_memory_map(dma, addr, len, dir);
188 e5332e63 David Gibson
    }
189 d86a77f8 David Gibson
}
190 d86a77f8 David Gibson
191 e5332e63 David Gibson
void iommu_dma_memory_unmap(DMAContext *dma,
192 e5332e63 David Gibson
                            void *buffer, dma_addr_t len,
193 e5332e63 David Gibson
                            DMADirection dir, dma_addr_t access_len);
194 d86a77f8 David Gibson
static inline void dma_memory_unmap(DMAContext *dma,
195 d86a77f8 David Gibson
                                    void *buffer, dma_addr_t len,
196 d86a77f8 David Gibson
                                    DMADirection dir, dma_addr_t access_len)
197 d86a77f8 David Gibson
{
198 e5332e63 David Gibson
    if (!dma_has_iommu(dma)) {
199 0ed8b6f6 Blue Swirl
        cpu_physical_memory_unmap(buffer, (target_phys_addr_t)len,
200 0ed8b6f6 Blue Swirl
                                  dir == DMA_DIRECTION_FROM_DEVICE,
201 0ed8b6f6 Blue Swirl
                                  access_len);
202 e5332e63 David Gibson
    } else {
203 e5332e63 David Gibson
        iommu_dma_memory_unmap(dma, buffer, len, dir, access_len);
204 e5332e63 David Gibson
    }
205 d86a77f8 David Gibson
}
206 d86a77f8 David Gibson
207 d86a77f8 David Gibson
#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
208 d86a77f8 David Gibson
    static inline uint##_bits##_t ld##_lname##_##_end##_dma(DMAContext *dma, \
209 d86a77f8 David Gibson
                                                            dma_addr_t addr) \
210 d86a77f8 David Gibson
    {                                                                   \
211 d86a77f8 David Gibson
        uint##_bits##_t val;                                            \
212 d86a77f8 David Gibson
        dma_memory_read(dma, addr, &val, (_bits) / 8);                  \
213 d86a77f8 David Gibson
        return _end##_bits##_to_cpu(val);                               \
214 d86a77f8 David Gibson
    }                                                                   \
215 d86a77f8 David Gibson
    static inline void st##_sname##_##_end##_dma(DMAContext *dma,       \
216 d86a77f8 David Gibson
                                                 dma_addr_t addr,       \
217 d86a77f8 David Gibson
                                                 uint##_bits##_t val)   \
218 d86a77f8 David Gibson
    {                                                                   \
219 d86a77f8 David Gibson
        val = cpu_to_##_end##_bits(val);                                \
220 d86a77f8 David Gibson
        dma_memory_write(dma, addr, &val, (_bits) / 8);                 \
221 d86a77f8 David Gibson
    }
222 d86a77f8 David Gibson
223 d86a77f8 David Gibson
static inline uint8_t ldub_dma(DMAContext *dma, dma_addr_t addr)
224 d86a77f8 David Gibson
{
225 d86a77f8 David Gibson
    uint8_t val;
226 d86a77f8 David Gibson
227 d86a77f8 David Gibson
    dma_memory_read(dma, addr, &val, 1);
228 d86a77f8 David Gibson
    return val;
229 d86a77f8 David Gibson
}
230 d86a77f8 David Gibson
231 d86a77f8 David Gibson
static inline void stb_dma(DMAContext *dma, dma_addr_t addr, uint8_t val)
232 d86a77f8 David Gibson
{
233 d86a77f8 David Gibson
    dma_memory_write(dma, addr, &val, 1);
234 d86a77f8 David Gibson
}
235 d86a77f8 David Gibson
236 d86a77f8 David Gibson
DEFINE_LDST_DMA(uw, w, 16, le);
237 d86a77f8 David Gibson
DEFINE_LDST_DMA(l, l, 32, le);
238 d86a77f8 David Gibson
DEFINE_LDST_DMA(q, q, 64, le);
239 d86a77f8 David Gibson
DEFINE_LDST_DMA(uw, w, 16, be);
240 d86a77f8 David Gibson
DEFINE_LDST_DMA(l, l, 32, be);
241 d86a77f8 David Gibson
DEFINE_LDST_DMA(q, q, 64, be);
242 d86a77f8 David Gibson
243 d86a77f8 David Gibson
#undef DEFINE_LDST_DMA
244 d86a77f8 David Gibson
245 e5332e63 David Gibson
void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
246 e5332e63 David Gibson
                      DMAMapFunc map, DMAUnmapFunc unmap);
247 e5332e63 David Gibson
248 10dc8aef Paolo Bonzini
struct ScatterGatherEntry {
249 d3231181 David Gibson
    dma_addr_t base;
250 d3231181 David Gibson
    dma_addr_t len;
251 10dc8aef Paolo Bonzini
};
252 244ab90e aliguori
253 c65bcef3 David Gibson
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma);
254 d3231181 David Gibson
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
255 244ab90e aliguori
void qemu_sglist_destroy(QEMUSGList *qsg);
256 10dc8aef Paolo Bonzini
#endif
257 244ab90e aliguori
258 cb144ccb Christoph Hellwig
typedef BlockDriverAIOCB *DMAIOFunc(BlockDriverState *bs, int64_t sector_num,
259 cb144ccb Christoph Hellwig
                                 QEMUIOVector *iov, int nb_sectors,
260 cb144ccb Christoph Hellwig
                                 BlockDriverCompletionFunc *cb, void *opaque);
261 cb144ccb Christoph Hellwig
262 cb144ccb Christoph Hellwig
BlockDriverAIOCB *dma_bdrv_io(BlockDriverState *bs,
263 cb144ccb Christoph Hellwig
                              QEMUSGList *sg, uint64_t sector_num,
264 cb144ccb Christoph Hellwig
                              DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
265 43cf8ae6 David Gibson
                              void *opaque, DMADirection dir);
266 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
267 59a703eb aliguori
                                QEMUSGList *sg, uint64_t sector,
268 59a703eb aliguori
                                BlockDriverCompletionFunc *cb, void *opaque);
269 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
270 59a703eb aliguori
                                 QEMUSGList *sg, uint64_t sector,
271 59a703eb aliguori
                                 BlockDriverCompletionFunc *cb, void *opaque);
272 8171ee35 Paolo Bonzini
uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
273 8171ee35 Paolo Bonzini
uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
274 8171ee35 Paolo Bonzini
275 84a69356 Paolo Bonzini
void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
276 84a69356 Paolo Bonzini
                    QEMUSGList *sg, enum BlockAcctType type);
277 84a69356 Paolo Bonzini
278 244ab90e aliguori
#endif