Statistics
| Branch: | Revision:

root / dma.h @ e5332e63

History | View | Annotate | Download (7.9 kB)

1 244ab90e aliguori
/*
2 244ab90e aliguori
 * DMA helper functions
3 244ab90e aliguori
 *
4 244ab90e aliguori
 * Copyright (c) 2009 Red Hat
5 244ab90e aliguori
 *
6 244ab90e aliguori
 * This work is licensed under the terms of the GNU General Public License
7 244ab90e aliguori
 * (GNU GPL), version 2 or later.
8 244ab90e aliguori
 */
9 244ab90e aliguori
10 244ab90e aliguori
#ifndef DMA_H
11 244ab90e aliguori
#define DMA_H
12 244ab90e aliguori
13 244ab90e aliguori
#include <stdio.h>
14 1ad2134f Paul Brook
#include "hw/hw.h"
15 59a703eb aliguori
#include "block.h"
16 244ab90e aliguori
17 d86a77f8 David Gibson
typedef struct DMAContext DMAContext;
18 10dc8aef Paolo Bonzini
typedef struct ScatterGatherEntry ScatterGatherEntry;
19 10dc8aef Paolo Bonzini
20 43cf8ae6 David Gibson
typedef enum {
21 43cf8ae6 David Gibson
    DMA_DIRECTION_TO_DEVICE = 0,
22 43cf8ae6 David Gibson
    DMA_DIRECTION_FROM_DEVICE = 1,
23 43cf8ae6 David Gibson
} DMADirection;
24 43cf8ae6 David Gibson
25 fead0c24 Paolo Bonzini
struct QEMUSGList {
26 fead0c24 Paolo Bonzini
    ScatterGatherEntry *sg;
27 fead0c24 Paolo Bonzini
    int nsg;
28 fead0c24 Paolo Bonzini
    int nalloc;
29 fead0c24 Paolo Bonzini
    size_t size;
30 c65bcef3 David Gibson
    DMAContext *dma;
31 fead0c24 Paolo Bonzini
};
32 fead0c24 Paolo Bonzini
33 10dc8aef Paolo Bonzini
#if defined(TARGET_PHYS_ADDR_BITS)
34 d9d1055e David Gibson
35 e5332e63 David Gibson
/*
36 e5332e63 David Gibson
 * When an IOMMU is present, bus addresses become distinct from
37 e5332e63 David Gibson
 * CPU/memory physical addresses and may be a different size.  Because
38 e5332e63 David Gibson
 * the IOVA size depends more on the bus than on the platform, we more
39 e5332e63 David Gibson
 * or less have to treat these as 64-bit always to cover all (or at
40 e5332e63 David Gibson
 * least most) cases.
41 e5332e63 David Gibson
 */
42 e5332e63 David Gibson
typedef uint64_t dma_addr_t;
43 e5332e63 David Gibson
44 e5332e63 David Gibson
#define DMA_ADDR_BITS 64
45 e5332e63 David Gibson
#define DMA_ADDR_FMT "%" PRIx64
46 e5332e63 David Gibson
47 e5332e63 David Gibson
typedef int DMATranslateFunc(DMAContext *dma,
48 e5332e63 David Gibson
                             dma_addr_t addr,
49 e5332e63 David Gibson
                             target_phys_addr_t *paddr,
50 e5332e63 David Gibson
                             target_phys_addr_t *len,
51 e5332e63 David Gibson
                             DMADirection dir);
52 e5332e63 David Gibson
typedef void* DMAMapFunc(DMAContext *dma,
53 e5332e63 David Gibson
                         dma_addr_t addr,
54 e5332e63 David Gibson
                         dma_addr_t *len,
55 e5332e63 David Gibson
                         DMADirection dir);
56 e5332e63 David Gibson
typedef void DMAUnmapFunc(DMAContext *dma,
57 e5332e63 David Gibson
                          void *buffer,
58 e5332e63 David Gibson
                          dma_addr_t len,
59 e5332e63 David Gibson
                          DMADirection dir,
60 e5332e63 David Gibson
                          dma_addr_t access_len);
61 e5332e63 David Gibson
62 e5332e63 David Gibson
struct DMAContext {
63 e5332e63 David Gibson
    DMATranslateFunc *translate;
64 e5332e63 David Gibson
    DMAMapFunc *map;
65 e5332e63 David Gibson
    DMAUnmapFunc *unmap;
66 e5332e63 David Gibson
};
67 e5332e63 David Gibson
68 e5332e63 David Gibson
static inline bool dma_has_iommu(DMAContext *dma)
69 e5332e63 David Gibson
{
70 e5332e63 David Gibson
    return !!dma;
71 e5332e63 David Gibson
}
72 d9d1055e David Gibson
73 d86a77f8 David Gibson
/* Checks that the given range of addresses is valid for DMA.  This is
74 d86a77f8 David Gibson
 * useful for certain cases, but usually you should just use
75 d86a77f8 David Gibson
 * dma_memory_{read,write}() and check for errors */
76 e5332e63 David Gibson
bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
77 e5332e63 David Gibson
                            DMADirection dir);
78 e5332e63 David Gibson
static inline bool dma_memory_valid(DMAContext *dma,
79 e5332e63 David Gibson
                                    dma_addr_t addr, dma_addr_t len,
80 e5332e63 David Gibson
                                    DMADirection dir)
81 d86a77f8 David Gibson
{
82 e5332e63 David Gibson
    if (!dma_has_iommu(dma)) {
83 e5332e63 David Gibson
        return true;
84 e5332e63 David Gibson
    } else {
85 e5332e63 David Gibson
        return iommu_dma_memory_valid(dma, addr, len, dir);
86 e5332e63 David Gibson
    }
87 d86a77f8 David Gibson
}
88 d86a77f8 David Gibson
89 e5332e63 David Gibson
int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
90 e5332e63 David Gibson
                        void *buf, dma_addr_t len, DMADirection dir);
91 d86a77f8 David Gibson
static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr,
92 d86a77f8 David Gibson
                                void *buf, dma_addr_t len, DMADirection dir)
93 d86a77f8 David Gibson
{
94 e5332e63 David Gibson
    if (!dma_has_iommu(dma)) {
95 e5332e63 David Gibson
        /* Fast-path for no IOMMU */
96 e5332e63 David Gibson
        cpu_physical_memory_rw(addr, buf, len,
97 e5332e63 David Gibson
                               dir == DMA_DIRECTION_FROM_DEVICE);
98 e5332e63 David Gibson
        return 0;
99 e5332e63 David Gibson
    } else {
100 e5332e63 David Gibson
        return iommu_dma_memory_rw(dma, addr, buf, len, dir);
101 e5332e63 David Gibson
    }
102 d86a77f8 David Gibson
}
103 d86a77f8 David Gibson
104 d86a77f8 David Gibson
static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr,
105 d86a77f8 David Gibson
                                  void *buf, dma_addr_t len)
106 d86a77f8 David Gibson
{
107 d86a77f8 David Gibson
    return dma_memory_rw(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
108 d86a77f8 David Gibson
}
109 d86a77f8 David Gibson
110 d86a77f8 David Gibson
static inline int dma_memory_write(DMAContext *dma, dma_addr_t addr,
111 d86a77f8 David Gibson
                                   const void *buf, dma_addr_t len)
112 d86a77f8 David Gibson
{
113 d86a77f8 David Gibson
    return dma_memory_rw(dma, addr, (void *)buf, len,
114 d86a77f8 David Gibson
                         DMA_DIRECTION_FROM_DEVICE);
115 d86a77f8 David Gibson
}
116 d86a77f8 David Gibson
117 e5332e63 David Gibson
int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
118 e5332e63 David Gibson
                         dma_addr_t len);
119 e5332e63 David Gibson
120 d86a77f8 David Gibson
int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len);
121 d86a77f8 David Gibson
122 e5332e63 David Gibson
void *iommu_dma_memory_map(DMAContext *dma,
123 e5332e63 David Gibson
                           dma_addr_t addr, dma_addr_t *len,
124 e5332e63 David Gibson
                           DMADirection dir);
125 d86a77f8 David Gibson
static inline void *dma_memory_map(DMAContext *dma,
126 d86a77f8 David Gibson
                                   dma_addr_t addr, dma_addr_t *len,
127 d86a77f8 David Gibson
                                   DMADirection dir)
128 d86a77f8 David Gibson
{
129 e5332e63 David Gibson
    if (!dma_has_iommu(dma)) {
130 e5332e63 David Gibson
        target_phys_addr_t xlen = *len;
131 e5332e63 David Gibson
        void *p;
132 e5332e63 David Gibson
133 e5332e63 David Gibson
        p = cpu_physical_memory_map(addr, &xlen,
134 e5332e63 David Gibson
                                    dir == DMA_DIRECTION_FROM_DEVICE);
135 e5332e63 David Gibson
        *len = xlen;
136 e5332e63 David Gibson
        return p;
137 e5332e63 David Gibson
    } else {
138 e5332e63 David Gibson
        return iommu_dma_memory_map(dma, addr, len, dir);
139 e5332e63 David Gibson
    }
140 d86a77f8 David Gibson
}
141 d86a77f8 David Gibson
142 e5332e63 David Gibson
void iommu_dma_memory_unmap(DMAContext *dma,
143 e5332e63 David Gibson
                            void *buffer, dma_addr_t len,
144 e5332e63 David Gibson
                            DMADirection dir, dma_addr_t access_len);
145 d86a77f8 David Gibson
static inline void dma_memory_unmap(DMAContext *dma,
146 d86a77f8 David Gibson
                                    void *buffer, dma_addr_t len,
147 d86a77f8 David Gibson
                                    DMADirection dir, dma_addr_t access_len)
148 d86a77f8 David Gibson
{
149 e5332e63 David Gibson
    if (!dma_has_iommu(dma)) {
150 e5332e63 David Gibson
        return cpu_physical_memory_unmap(buffer, (target_phys_addr_t)len,
151 e5332e63 David Gibson
                                         dir == DMA_DIRECTION_FROM_DEVICE,
152 e5332e63 David Gibson
                                         access_len);
153 e5332e63 David Gibson
    } else {
154 e5332e63 David Gibson
        iommu_dma_memory_unmap(dma, buffer, len, dir, access_len);
155 e5332e63 David Gibson
    }
156 d86a77f8 David Gibson
}
157 d86a77f8 David Gibson
158 d86a77f8 David Gibson
#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
159 d86a77f8 David Gibson
    static inline uint##_bits##_t ld##_lname##_##_end##_dma(DMAContext *dma, \
160 d86a77f8 David Gibson
                                                            dma_addr_t addr) \
161 d86a77f8 David Gibson
    {                                                                   \
162 d86a77f8 David Gibson
        uint##_bits##_t val;                                            \
163 d86a77f8 David Gibson
        dma_memory_read(dma, addr, &val, (_bits) / 8);                  \
164 d86a77f8 David Gibson
        return _end##_bits##_to_cpu(val);                               \
165 d86a77f8 David Gibson
    }                                                                   \
166 d86a77f8 David Gibson
    static inline void st##_sname##_##_end##_dma(DMAContext *dma,       \
167 d86a77f8 David Gibson
                                                 dma_addr_t addr,       \
168 d86a77f8 David Gibson
                                                 uint##_bits##_t val)   \
169 d86a77f8 David Gibson
    {                                                                   \
170 d86a77f8 David Gibson
        val = cpu_to_##_end##_bits(val);                                \
171 d86a77f8 David Gibson
        dma_memory_write(dma, addr, &val, (_bits) / 8);                 \
172 d86a77f8 David Gibson
    }
173 d86a77f8 David Gibson
174 d86a77f8 David Gibson
static inline uint8_t ldub_dma(DMAContext *dma, dma_addr_t addr)
175 d86a77f8 David Gibson
{
176 d86a77f8 David Gibson
    uint8_t val;
177 d86a77f8 David Gibson
178 d86a77f8 David Gibson
    dma_memory_read(dma, addr, &val, 1);
179 d86a77f8 David Gibson
    return val;
180 d86a77f8 David Gibson
}
181 d86a77f8 David Gibson
182 d86a77f8 David Gibson
static inline void stb_dma(DMAContext *dma, dma_addr_t addr, uint8_t val)
183 d86a77f8 David Gibson
{
184 d86a77f8 David Gibson
    dma_memory_write(dma, addr, &val, 1);
185 d86a77f8 David Gibson
}
186 d86a77f8 David Gibson
187 d86a77f8 David Gibson
DEFINE_LDST_DMA(uw, w, 16, le);
188 d86a77f8 David Gibson
DEFINE_LDST_DMA(l, l, 32, le);
189 d86a77f8 David Gibson
DEFINE_LDST_DMA(q, q, 64, le);
190 d86a77f8 David Gibson
DEFINE_LDST_DMA(uw, w, 16, be);
191 d86a77f8 David Gibson
DEFINE_LDST_DMA(l, l, 32, be);
192 d86a77f8 David Gibson
DEFINE_LDST_DMA(q, q, 64, be);
193 d86a77f8 David Gibson
194 d86a77f8 David Gibson
#undef DEFINE_LDST_DMA
195 d86a77f8 David Gibson
196 e5332e63 David Gibson
void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
197 e5332e63 David Gibson
                      DMAMapFunc map, DMAUnmapFunc unmap);
198 e5332e63 David Gibson
199 10dc8aef Paolo Bonzini
struct ScatterGatherEntry {
200 d3231181 David Gibson
    dma_addr_t base;
201 d3231181 David Gibson
    dma_addr_t len;
202 10dc8aef Paolo Bonzini
};
203 244ab90e aliguori
204 c65bcef3 David Gibson
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma);
205 d3231181 David Gibson
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
206 244ab90e aliguori
void qemu_sglist_destroy(QEMUSGList *qsg);
207 10dc8aef Paolo Bonzini
#endif
208 244ab90e aliguori
209 cb144ccb Christoph Hellwig
typedef BlockDriverAIOCB *DMAIOFunc(BlockDriverState *bs, int64_t sector_num,
210 cb144ccb Christoph Hellwig
                                 QEMUIOVector *iov, int nb_sectors,
211 cb144ccb Christoph Hellwig
                                 BlockDriverCompletionFunc *cb, void *opaque);
212 cb144ccb Christoph Hellwig
213 cb144ccb Christoph Hellwig
BlockDriverAIOCB *dma_bdrv_io(BlockDriverState *bs,
214 cb144ccb Christoph Hellwig
                              QEMUSGList *sg, uint64_t sector_num,
215 cb144ccb Christoph Hellwig
                              DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
216 43cf8ae6 David Gibson
                              void *opaque, DMADirection dir);
217 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
218 59a703eb aliguori
                                QEMUSGList *sg, uint64_t sector,
219 59a703eb aliguori
                                BlockDriverCompletionFunc *cb, void *opaque);
220 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
221 59a703eb aliguori
                                 QEMUSGList *sg, uint64_t sector,
222 59a703eb aliguori
                                 BlockDriverCompletionFunc *cb, void *opaque);
223 8171ee35 Paolo Bonzini
uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
224 8171ee35 Paolo Bonzini
uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
225 8171ee35 Paolo Bonzini
226 84a69356 Paolo Bonzini
void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
227 84a69356 Paolo Bonzini
                    QEMUSGList *sg, enum BlockAcctType type);
228 84a69356 Paolo Bonzini
229 244ab90e aliguori
#endif