Statistics
| Branch: | Revision:

root / include / sysemu / dma.h @ cba933b2

History | View | Annotate | Download (7.4 kB)

1 244ab90e aliguori
/*
2 244ab90e aliguori
 * DMA helper functions
3 244ab90e aliguori
 *
4 244ab90e aliguori
 * Copyright (c) 2009 Red Hat
5 244ab90e aliguori
 *
6 244ab90e aliguori
 * This work is licensed under the terms of the GNU General Public License
7 244ab90e aliguori
 * (GNU GPL), version 2 or later.
8 244ab90e aliguori
 */
9 244ab90e aliguori
10 244ab90e aliguori
#ifndef DMA_H
11 244ab90e aliguori
#define DMA_H
12 244ab90e aliguori
13 244ab90e aliguori
#include <stdio.h>
14 022c62cb Paolo Bonzini
#include "exec/memory.h"
15 df32fd1c Paolo Bonzini
#include "exec/address-spaces.h"
16 1ad2134f Paul Brook
#include "hw/hw.h"
17 737e150e Paolo Bonzini
#include "block/block.h"
18 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
19 244ab90e aliguori
20 10dc8aef Paolo Bonzini
typedef struct ScatterGatherEntry ScatterGatherEntry;
21 10dc8aef Paolo Bonzini
22 43cf8ae6 David Gibson
typedef enum {
23 43cf8ae6 David Gibson
    DMA_DIRECTION_TO_DEVICE = 0,
24 43cf8ae6 David Gibson
    DMA_DIRECTION_FROM_DEVICE = 1,
25 43cf8ae6 David Gibson
} DMADirection;
26 43cf8ae6 David Gibson
27 fead0c24 Paolo Bonzini
struct QEMUSGList {
28 fead0c24 Paolo Bonzini
    ScatterGatherEntry *sg;
29 fead0c24 Paolo Bonzini
    int nsg;
30 fead0c24 Paolo Bonzini
    int nalloc;
31 fead0c24 Paolo Bonzini
    size_t size;
32 f487b677 Paolo Bonzini
    DeviceState *dev;
33 df32fd1c Paolo Bonzini
    AddressSpace *as;
34 fead0c24 Paolo Bonzini
};
35 fead0c24 Paolo Bonzini
36 4be403c8 Avi Kivity
#ifndef CONFIG_USER_ONLY
37 d9d1055e David Gibson
38 e5332e63 David Gibson
/*
39 e5332e63 David Gibson
 * When an IOMMU is present, bus addresses become distinct from
40 e5332e63 David Gibson
 * CPU/memory physical addresses and may be a different size.  Because
41 e5332e63 David Gibson
 * the IOVA size depends more on the bus than on the platform, we more
42 e5332e63 David Gibson
 * or less have to treat these as 64-bit always to cover all (or at
43 e5332e63 David Gibson
 * least most) cases.
44 e5332e63 David Gibson
 */
45 e5332e63 David Gibson
typedef uint64_t dma_addr_t;
46 e5332e63 David Gibson
47 e5332e63 David Gibson
#define DMA_ADDR_BITS 64
48 e5332e63 David Gibson
#define DMA_ADDR_FMT "%" PRIx64
49 e5332e63 David Gibson
50 df32fd1c Paolo Bonzini
static inline void dma_barrier(AddressSpace *as, DMADirection dir)
51 7a0bac4d Benjamin Herrenschmidt
{
52 7a0bac4d Benjamin Herrenschmidt
    /*
53 7a0bac4d Benjamin Herrenschmidt
     * This is called before DMA read and write operations
54 7a0bac4d Benjamin Herrenschmidt
     * unless the _relaxed form is used and is responsible
55 7a0bac4d Benjamin Herrenschmidt
     * for providing some sane ordering of accesses vs
56 7a0bac4d Benjamin Herrenschmidt
     * concurrently running VCPUs.
57 7a0bac4d Benjamin Herrenschmidt
     *
58 7a0bac4d Benjamin Herrenschmidt
     * Users of map(), unmap() or lower level st/ld_*
59 7a0bac4d Benjamin Herrenschmidt
     * operations are responsible for providing their own
60 7a0bac4d Benjamin Herrenschmidt
     * ordering via barriers.
61 7a0bac4d Benjamin Herrenschmidt
     *
62 7a0bac4d Benjamin Herrenschmidt
     * This primitive implementation does a simple smp_mb()
63 7a0bac4d Benjamin Herrenschmidt
     * before each operation which provides pretty much full
64 7a0bac4d Benjamin Herrenschmidt
     * ordering.
65 7a0bac4d Benjamin Herrenschmidt
     *
66 7a0bac4d Benjamin Herrenschmidt
     * A smarter implementation can be devised if needed to
67 7a0bac4d Benjamin Herrenschmidt
     * use lighter barriers based on the direction of the
68 7a0bac4d Benjamin Herrenschmidt
     * transfer, the DMA context, etc...
69 7a0bac4d Benjamin Herrenschmidt
     */
70 7a0bac4d Benjamin Herrenschmidt
    if (kvm_enabled()) {
71 7a0bac4d Benjamin Herrenschmidt
        smp_mb();
72 7a0bac4d Benjamin Herrenschmidt
    }
73 7a0bac4d Benjamin Herrenschmidt
}
74 7a0bac4d Benjamin Herrenschmidt
75 d86a77f8 David Gibson
/* Checks that the given range of addresses is valid for DMA.  This is
76 d86a77f8 David Gibson
 * useful for certain cases, but usually you should just use
77 d86a77f8 David Gibson
 * dma_memory_{read,write}() and check for errors */
78 df32fd1c Paolo Bonzini
static inline bool dma_memory_valid(AddressSpace *as,
79 e5332e63 David Gibson
                                    dma_addr_t addr, dma_addr_t len,
80 e5332e63 David Gibson
                                    DMADirection dir)
81 d86a77f8 David Gibson
{
82 df32fd1c Paolo Bonzini
    return address_space_access_valid(as, addr, len,
83 24addbc7 Paolo Bonzini
                                      dir == DMA_DIRECTION_FROM_DEVICE);
84 d86a77f8 David Gibson
}
85 d86a77f8 David Gibson
86 df32fd1c Paolo Bonzini
static inline int dma_memory_rw_relaxed(AddressSpace *as, dma_addr_t addr,
87 7a0bac4d Benjamin Herrenschmidt
                                        void *buf, dma_addr_t len,
88 7a0bac4d Benjamin Herrenschmidt
                                        DMADirection dir)
89 d86a77f8 David Gibson
{
90 df32fd1c Paolo Bonzini
    return address_space_rw(as, addr, buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
91 d86a77f8 David Gibson
}
92 d86a77f8 David Gibson
93 df32fd1c Paolo Bonzini
static inline int dma_memory_read_relaxed(AddressSpace *as, dma_addr_t addr,
94 7a0bac4d Benjamin Herrenschmidt
                                          void *buf, dma_addr_t len)
95 7a0bac4d Benjamin Herrenschmidt
{
96 df32fd1c Paolo Bonzini
    return dma_memory_rw_relaxed(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
97 7a0bac4d Benjamin Herrenschmidt
}
98 7a0bac4d Benjamin Herrenschmidt
99 df32fd1c Paolo Bonzini
static inline int dma_memory_write_relaxed(AddressSpace *as, dma_addr_t addr,
100 7a0bac4d Benjamin Herrenschmidt
                                           const void *buf, dma_addr_t len)
101 7a0bac4d Benjamin Herrenschmidt
{
102 df32fd1c Paolo Bonzini
    return dma_memory_rw_relaxed(as, addr, (void *)buf, len,
103 7a0bac4d Benjamin Herrenschmidt
                                 DMA_DIRECTION_FROM_DEVICE);
104 7a0bac4d Benjamin Herrenschmidt
}
105 7a0bac4d Benjamin Herrenschmidt
106 df32fd1c Paolo Bonzini
static inline int dma_memory_rw(AddressSpace *as, dma_addr_t addr,
107 7a0bac4d Benjamin Herrenschmidt
                                void *buf, dma_addr_t len,
108 7a0bac4d Benjamin Herrenschmidt
                                DMADirection dir)
109 7a0bac4d Benjamin Herrenschmidt
{
110 df32fd1c Paolo Bonzini
    dma_barrier(as, dir);
111 7a0bac4d Benjamin Herrenschmidt
112 df32fd1c Paolo Bonzini
    return dma_memory_rw_relaxed(as, addr, buf, len, dir);
113 7a0bac4d Benjamin Herrenschmidt
}
114 7a0bac4d Benjamin Herrenschmidt
115 df32fd1c Paolo Bonzini
static inline int dma_memory_read(AddressSpace *as, dma_addr_t addr,
116 d86a77f8 David Gibson
                                  void *buf, dma_addr_t len)
117 d86a77f8 David Gibson
{
118 df32fd1c Paolo Bonzini
    return dma_memory_rw(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
119 d86a77f8 David Gibson
}
120 d86a77f8 David Gibson
121 df32fd1c Paolo Bonzini
static inline int dma_memory_write(AddressSpace *as, dma_addr_t addr,
122 d86a77f8 David Gibson
                                   const void *buf, dma_addr_t len)
123 d86a77f8 David Gibson
{
124 df32fd1c Paolo Bonzini
    return dma_memory_rw(as, addr, (void *)buf, len,
125 d86a77f8 David Gibson
                         DMA_DIRECTION_FROM_DEVICE);
126 d86a77f8 David Gibson
}
127 d86a77f8 David Gibson
128 df32fd1c Paolo Bonzini
int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len);
129 d86a77f8 David Gibson
130 df32fd1c Paolo Bonzini
static inline void *dma_memory_map(AddressSpace *as,
131 d86a77f8 David Gibson
                                   dma_addr_t addr, dma_addr_t *len,
132 d86a77f8 David Gibson
                                   DMADirection dir)
133 d86a77f8 David Gibson
{
134 24addbc7 Paolo Bonzini
    hwaddr xlen = *len;
135 24addbc7 Paolo Bonzini
    void *p;
136 24addbc7 Paolo Bonzini
137 df32fd1c Paolo Bonzini
    p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE);
138 24addbc7 Paolo Bonzini
    *len = xlen;
139 24addbc7 Paolo Bonzini
    return p;
140 d86a77f8 David Gibson
}
141 d86a77f8 David Gibson
142 df32fd1c Paolo Bonzini
static inline void dma_memory_unmap(AddressSpace *as,
143 d86a77f8 David Gibson
                                    void *buffer, dma_addr_t len,
144 d86a77f8 David Gibson
                                    DMADirection dir, dma_addr_t access_len)
145 d86a77f8 David Gibson
{
146 df32fd1c Paolo Bonzini
    address_space_unmap(as, buffer, (hwaddr)len,
147 24addbc7 Paolo Bonzini
                        dir == DMA_DIRECTION_FROM_DEVICE, access_len);
148 d86a77f8 David Gibson
}
149 d86a77f8 David Gibson
150 d86a77f8 David Gibson
#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
151 df32fd1c Paolo Bonzini
    static inline uint##_bits##_t ld##_lname##_##_end##_dma(AddressSpace *as, \
152 d86a77f8 David Gibson
                                                            dma_addr_t addr) \
153 d86a77f8 David Gibson
    {                                                                   \
154 d86a77f8 David Gibson
        uint##_bits##_t val;                                            \
155 df32fd1c Paolo Bonzini
        dma_memory_read(as, addr, &val, (_bits) / 8);                   \
156 d86a77f8 David Gibson
        return _end##_bits##_to_cpu(val);                               \
157 d86a77f8 David Gibson
    }                                                                   \
158 df32fd1c Paolo Bonzini
    static inline void st##_sname##_##_end##_dma(AddressSpace *as,      \
159 d86a77f8 David Gibson
                                                 dma_addr_t addr,       \
160 d86a77f8 David Gibson
                                                 uint##_bits##_t val)   \
161 d86a77f8 David Gibson
    {                                                                   \
162 d86a77f8 David Gibson
        val = cpu_to_##_end##_bits(val);                                \
163 df32fd1c Paolo Bonzini
        dma_memory_write(as, addr, &val, (_bits) / 8);                  \
164 d86a77f8 David Gibson
    }
165 d86a77f8 David Gibson
166 df32fd1c Paolo Bonzini
static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr)
167 d86a77f8 David Gibson
{
168 d86a77f8 David Gibson
    uint8_t val;
169 d86a77f8 David Gibson
170 df32fd1c Paolo Bonzini
    dma_memory_read(as, addr, &val, 1);
171 d86a77f8 David Gibson
    return val;
172 d86a77f8 David Gibson
}
173 d86a77f8 David Gibson
174 df32fd1c Paolo Bonzini
static inline void stb_dma(AddressSpace *as, dma_addr_t addr, uint8_t val)
175 d86a77f8 David Gibson
{
176 df32fd1c Paolo Bonzini
    dma_memory_write(as, addr, &val, 1);
177 d86a77f8 David Gibson
}
178 d86a77f8 David Gibson
179 d86a77f8 David Gibson
DEFINE_LDST_DMA(uw, w, 16, le);
180 d86a77f8 David Gibson
DEFINE_LDST_DMA(l, l, 32, le);
181 d86a77f8 David Gibson
DEFINE_LDST_DMA(q, q, 64, le);
182 d86a77f8 David Gibson
DEFINE_LDST_DMA(uw, w, 16, be);
183 d86a77f8 David Gibson
DEFINE_LDST_DMA(l, l, 32, be);
184 d86a77f8 David Gibson
DEFINE_LDST_DMA(q, q, 64, be);
185 d86a77f8 David Gibson
186 d86a77f8 David Gibson
#undef DEFINE_LDST_DMA
187 d86a77f8 David Gibson
188 10dc8aef Paolo Bonzini
struct ScatterGatherEntry {
189 d3231181 David Gibson
    dma_addr_t base;
190 d3231181 David Gibson
    dma_addr_t len;
191 10dc8aef Paolo Bonzini
};
192 244ab90e aliguori
193 f487b677 Paolo Bonzini
void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
194 f487b677 Paolo Bonzini
                      AddressSpace *as);
195 d3231181 David Gibson
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
196 244ab90e aliguori
void qemu_sglist_destroy(QEMUSGList *qsg);
197 10dc8aef Paolo Bonzini
#endif
198 244ab90e aliguori
199 cb144ccb Christoph Hellwig
typedef BlockDriverAIOCB *DMAIOFunc(BlockDriverState *bs, int64_t sector_num,
200 cb144ccb Christoph Hellwig
                                 QEMUIOVector *iov, int nb_sectors,
201 cb144ccb Christoph Hellwig
                                 BlockDriverCompletionFunc *cb, void *opaque);
202 cb144ccb Christoph Hellwig
203 cb144ccb Christoph Hellwig
BlockDriverAIOCB *dma_bdrv_io(BlockDriverState *bs,
204 cb144ccb Christoph Hellwig
                              QEMUSGList *sg, uint64_t sector_num,
205 cb144ccb Christoph Hellwig
                              DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
206 43cf8ae6 David Gibson
                              void *opaque, DMADirection dir);
207 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
208 59a703eb aliguori
                                QEMUSGList *sg, uint64_t sector,
209 59a703eb aliguori
                                BlockDriverCompletionFunc *cb, void *opaque);
210 59a703eb aliguori
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
211 59a703eb aliguori
                                 QEMUSGList *sg, uint64_t sector,
212 59a703eb aliguori
                                 BlockDriverCompletionFunc *cb, void *opaque);
213 8171ee35 Paolo Bonzini
uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
214 8171ee35 Paolo Bonzini
uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
215 8171ee35 Paolo Bonzini
216 84a69356 Paolo Bonzini
void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
217 84a69356 Paolo Bonzini
                    QEMUSGList *sg, enum BlockAcctType type);
218 84a69356 Paolo Bonzini
219 244ab90e aliguori
#endif