Revision 7a0bac4d dma.h
b/dma.h | ||
---|---|---|
13 | 13 |
#include <stdio.h> |
14 | 14 |
#include "hw/hw.h" |
15 | 15 |
#include "block.h" |
16 |
#include "kvm.h" |
|
16 | 17 |
|
17 | 18 |
typedef struct DMAContext DMAContext; |
18 | 19 |
typedef struct ScatterGatherEntry ScatterGatherEntry; |
... | ... | |
65 | 66 |
DMAUnmapFunc *unmap; |
66 | 67 |
}; |
67 | 68 |
|
69 |
static inline void dma_barrier(DMAContext *dma, DMADirection dir) |
|
70 |
{ |
|
71 |
/* |
|
72 |
* This is called before DMA read and write operations |
|
73 |
* unless the _relaxed form is used and is responsible |
|
74 |
* for providing some sane ordering of accesses vs |
|
75 |
* concurrently running VCPUs. |
|
76 |
* |
|
77 |
* Users of map(), unmap() or lower level st/ld_* |
|
78 |
* operations are responsible for providing their own |
|
79 |
* ordering via barriers. |
|
80 |
* |
|
81 |
* This primitive implementation does a simple smp_mb() |
|
82 |
* before each operation which provides pretty much full |
|
83 |
* ordering. |
|
84 |
* |
|
85 |
* A smarter implementation can be devised if needed to |
|
86 |
* use lighter barriers based on the direction of the |
|
87 |
* transfer, the DMA context, etc... |
|
88 |
*/ |
|
89 |
if (kvm_enabled()) { |
|
90 |
smp_mb(); |
|
91 |
} |
|
92 |
} |
|
93 |
|
|
68 | 94 |
static inline bool dma_has_iommu(DMAContext *dma) |
69 | 95 |
{ |
70 | 96 |
return !!dma; |
... | ... | |
88 | 114 |
|
89 | 115 |
int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, |
90 | 116 |
void *buf, dma_addr_t len, DMADirection dir); |
91 |
static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr, |
|
92 |
void *buf, dma_addr_t len, DMADirection dir) |
|
117 |
static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr, |
|
118 |
void *buf, dma_addr_t len, |
|
119 |
DMADirection dir) |
|
93 | 120 |
{ |
94 | 121 |
if (!dma_has_iommu(dma)) { |
95 | 122 |
/* Fast-path for no IOMMU */ |
... | ... | |
101 | 128 |
} |
102 | 129 |
} |
103 | 130 |
|
131 |
static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr, |
|
132 |
void *buf, dma_addr_t len) |
|
133 |
{ |
|
134 |
return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE); |
|
135 |
} |
|
136 |
|
|
137 |
static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr, |
|
138 |
const void *buf, dma_addr_t len) |
|
139 |
{ |
|
140 |
return dma_memory_rw_relaxed(dma, addr, (void *)buf, len, |
|
141 |
DMA_DIRECTION_FROM_DEVICE); |
|
142 |
} |
|
143 |
|
|
144 |
static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr, |
|
145 |
void *buf, dma_addr_t len, |
|
146 |
DMADirection dir) |
|
147 |
{ |
|
148 |
dma_barrier(dma, dir); |
|
149 |
|
|
150 |
return dma_memory_rw_relaxed(dma, addr, buf, len, dir); |
|
151 |
} |
|
152 |
|
|
104 | 153 |
static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr, |
105 | 154 |
void *buf, dma_addr_t len) |
106 | 155 |
{ |
Also available in: Unified diff