Revision e5332e63 dma.h
b/dma.h | ||
---|---|---|
31 | 31 |
}; |
32 | 32 |
|
33 | 33 |
#if defined(TARGET_PHYS_ADDR_BITS) |
34 |
typedef target_phys_addr_t dma_addr_t; |
|
35 | 34 |
|
36 |
#define DMA_ADDR_BITS TARGET_PHYS_ADDR_BITS |
|
37 |
#define DMA_ADDR_FMT TARGET_FMT_plx |
|
35 |
/* |
|
36 |
* When an IOMMU is present, bus addresses become distinct from |
|
37 |
* CPU/memory physical addresses and may be a different size. Because |
|
38 |
* the IOVA size depends more on the bus than on the platform, we more |
|
39 |
* or less have to treat these as 64-bit always to cover all (or at |
|
40 |
* least most) cases. |
|
41 |
*/ |
|
42 |
typedef uint64_t dma_addr_t; |
|
43 |
|
|
44 |
#define DMA_ADDR_BITS 64 |
|
45 |
#define DMA_ADDR_FMT "%" PRIx64 |
|
46 |
|
|
47 |
typedef int DMATranslateFunc(DMAContext *dma, |
|
48 |
dma_addr_t addr, |
|
49 |
target_phys_addr_t *paddr, |
|
50 |
target_phys_addr_t *len, |
|
51 |
DMADirection dir); |
|
52 |
typedef void* DMAMapFunc(DMAContext *dma, |
|
53 |
dma_addr_t addr, |
|
54 |
dma_addr_t *len, |
|
55 |
DMADirection dir); |
|
56 |
typedef void DMAUnmapFunc(DMAContext *dma, |
|
57 |
void *buffer, |
|
58 |
dma_addr_t len, |
|
59 |
DMADirection dir, |
|
60 |
dma_addr_t access_len); |
|
61 |
|
|
62 |
struct DMAContext { |
|
63 |
DMATranslateFunc *translate; |
|
64 |
DMAMapFunc *map; |
|
65 |
DMAUnmapFunc *unmap; |
|
66 |
}; |
|
67 |
|
|
68 |
static inline bool dma_has_iommu(DMAContext *dma) |
|
69 |
{ |
|
70 |
return !!dma; |
|
71 |
} |
|
38 | 72 |
|
39 | 73 |
/* Checks that the given range of addresses is valid for DMA. This is |
40 | 74 |
* useful for certain cases, but usually you should just use |
41 | 75 |
* dma_memory_{read,write}() and check for errors */ |
42 |
static inline bool dma_memory_valid(DMAContext *dma, dma_addr_t addr, |
|
43 |
dma_addr_t len, DMADirection dir) |
|
76 |
bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len, |
|
77 |
DMADirection dir); |
|
78 |
static inline bool dma_memory_valid(DMAContext *dma, |
|
79 |
dma_addr_t addr, dma_addr_t len, |
|
80 |
DMADirection dir) |
|
44 | 81 |
{ |
45 |
/* Stub version, with no iommu we assume all bus addresses are valid */ |
|
46 |
return true; |
|
82 |
if (!dma_has_iommu(dma)) { |
|
83 |
return true; |
|
84 |
} else { |
|
85 |
return iommu_dma_memory_valid(dma, addr, len, dir); |
|
86 |
} |
|
47 | 87 |
} |
48 | 88 |
|
89 |
int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, |
|
90 |
void *buf, dma_addr_t len, DMADirection dir); |
|
49 | 91 |
static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr, |
50 | 92 |
void *buf, dma_addr_t len, DMADirection dir) |
51 | 93 |
{ |
52 |
/* Stub version when we have no iommu support */ |
|
53 |
cpu_physical_memory_rw(addr, buf, (target_phys_addr_t)len, |
|
54 |
dir == DMA_DIRECTION_FROM_DEVICE); |
|
55 |
return 0; |
|
94 |
if (!dma_has_iommu(dma)) { |
|
95 |
/* Fast-path for no IOMMU */ |
|
96 |
cpu_physical_memory_rw(addr, buf, len, |
|
97 |
dir == DMA_DIRECTION_FROM_DEVICE); |
|
98 |
return 0; |
|
99 |
} else { |
|
100 |
return iommu_dma_memory_rw(dma, addr, buf, len, dir); |
|
101 |
} |
|
56 | 102 |
} |
57 | 103 |
|
58 | 104 |
static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr, |
... | ... | |
68 | 114 |
DMA_DIRECTION_FROM_DEVICE); |
69 | 115 |
} |
70 | 116 |
|
117 |
int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, |
|
118 |
dma_addr_t len); |
|
119 |
|
|
71 | 120 |
int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len); |
72 | 121 |
|
122 |
void *iommu_dma_memory_map(DMAContext *dma, |
|
123 |
dma_addr_t addr, dma_addr_t *len, |
|
124 |
DMADirection dir); |
|
73 | 125 |
static inline void *dma_memory_map(DMAContext *dma, |
74 | 126 |
dma_addr_t addr, dma_addr_t *len, |
75 | 127 |
DMADirection dir) |
76 | 128 |
{ |
77 |
target_phys_addr_t xlen = *len; |
|
78 |
void *p; |
|
79 |
|
|
80 |
p = cpu_physical_memory_map(addr, &xlen, |
|
81 |
dir == DMA_DIRECTION_FROM_DEVICE); |
|
82 |
*len = xlen; |
|
83 |
return p; |
|
129 |
if (!dma_has_iommu(dma)) { |
|
130 |
target_phys_addr_t xlen = *len; |
|
131 |
void *p; |
|
132 |
|
|
133 |
p = cpu_physical_memory_map(addr, &xlen, |
|
134 |
dir == DMA_DIRECTION_FROM_DEVICE); |
|
135 |
*len = xlen; |
|
136 |
return p; |
|
137 |
} else { |
|
138 |
return iommu_dma_memory_map(dma, addr, len, dir); |
|
139 |
} |
|
84 | 140 |
} |
85 | 141 |
|
142 |
void iommu_dma_memory_unmap(DMAContext *dma, |
|
143 |
void *buffer, dma_addr_t len, |
|
144 |
DMADirection dir, dma_addr_t access_len); |
|
86 | 145 |
static inline void dma_memory_unmap(DMAContext *dma, |
87 | 146 |
void *buffer, dma_addr_t len, |
88 | 147 |
DMADirection dir, dma_addr_t access_len) |
89 | 148 |
{ |
90 |
return cpu_physical_memory_unmap(buffer, (target_phys_addr_t)len, |
|
91 |
dir == DMA_DIRECTION_FROM_DEVICE, |
|
92 |
access_len); |
|
149 |
if (!dma_has_iommu(dma)) { |
|
150 |
return cpu_physical_memory_unmap(buffer, (target_phys_addr_t)len, |
|
151 |
dir == DMA_DIRECTION_FROM_DEVICE, |
|
152 |
access_len); |
|
153 |
} else { |
|
154 |
iommu_dma_memory_unmap(dma, buffer, len, dir, access_len); |
|
155 |
} |
|
93 | 156 |
} |
94 | 157 |
|
95 | 158 |
#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \ |
... | ... | |
130 | 193 |
|
131 | 194 |
#undef DEFINE_LDST_DMA |
132 | 195 |
|
196 |
void dma_context_init(DMAContext *dma, DMATranslateFunc translate, |
|
197 |
DMAMapFunc map, DMAUnmapFunc unmap); |
|
198 |
|
|
133 | 199 |
struct ScatterGatherEntry { |
134 | 200 |
dma_addr_t base; |
135 | 201 |
dma_addr_t len; |
Also available in: Unified diff