Revision e5332e63

b/dma-helpers.c
9 9

  
10 10
#include "dma.h"
11 11
#include "trace.h"
12
#include "range.h"
13
#include "qemu-thread.h"
12 14

  
13
int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
15
/* #define DEBUG_IOMMU */
16

  
17
static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len)
14 18
{
15 19
#define FILLBUF_SIZE 512
16 20
    uint8_t fillbuf[FILLBUF_SIZE];
......
23 27
        len -= len;
24 28
        addr += len;
25 29
    }
30
}
31

  
32
int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
33
{
34
    if (dma_has_iommu(dma)) {
35
        return iommu_dma_memory_set(dma, addr, c, len);
36
    }
37
    do_dma_memory_set(addr, c, len);
38

  
26 39
    return 0;
27 40
}
28 41

  
......
260 273
{
261 274
    bdrv_acct_start(bs, cookie, sg->size, type);
262 275
}
276

  
277
bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
278
                            DMADirection dir)
279
{
280
    target_phys_addr_t paddr, plen;
281

  
282
#ifdef DEBUG_IOMMU
283
    fprintf(stderr, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT
284
            " len=0x" DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
285
#endif
286

  
287
    while (len) {
288
        if (dma->translate(dma, addr, &paddr, &plen, dir) != 0) {
289
            return false;
290
        }
291

  
292
        /* The translation might be valid for larger regions. */
293
        if (plen > len) {
294
            plen = len;
295
        }
296

  
297
        len -= plen;
298
        addr += plen;
299
    }
300

  
301
    return true;
302
}
303

  
304
int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
305
                        void *buf, dma_addr_t len, DMADirection dir)
306
{
307
    target_phys_addr_t paddr, plen;
308
    int err;
309

  
310
#ifdef DEBUG_IOMMU
311
    fprintf(stderr, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT " len=0x"
312
            DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
313
#endif
314

  
315
    while (len) {
316
        err = dma->translate(dma, addr, &paddr, &plen, dir);
317
        if (err) {
318
	    /*
319
             * In case of failure on reads from the guest, we clean the
320
             * destination buffer so that a device that doesn't test
321
             * for errors will not expose qemu internal memory.
322
	     */
323
	    memset(buf, 0, len);
324
            return -1;
325
        }
326

  
327
        /* The translation might be valid for larger regions. */
328
        if (plen > len) {
329
            plen = len;
330
        }
331

  
332
        cpu_physical_memory_rw(paddr, buf, plen,
333
                               dir == DMA_DIRECTION_FROM_DEVICE);
334

  
335
        len -= plen;
336
        addr += plen;
337
        buf += plen;
338
    }
339

  
340
    return 0;
341
}
342

  
343
int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
344
                         dma_addr_t len)
345
{
346
    target_phys_addr_t paddr, plen;
347
    int err;
348

  
349
#ifdef DEBUG_IOMMU
350
    fprintf(stderr, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT
351
            " len=0x" DMA_ADDR_FMT "\n", dma, addr, len);
352
#endif
353

  
354
    while (len) {
355
        err = dma->translate(dma, addr, &paddr, &plen,
356
                             DMA_DIRECTION_FROM_DEVICE);
357
        if (err) {
358
            return err;
359
        }
360

  
361
        /* The translation might be valid for larger regions. */
362
        if (plen > len) {
363
            plen = len;
364
        }
365

  
366
        do_dma_memory_set(paddr, c, plen);
367

  
368
        len -= plen;
369
        addr += plen;
370
    }
371

  
372
    return 0;
373
}
374

  
375
void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
376
                      DMAMapFunc map, DMAUnmapFunc unmap)
377
{
378
#ifdef DEBUG_IOMMU
379
    fprintf(stderr, "dma_context_init(%p, %p, %p, %p)\n",
380
            dma, translate, map, unmap);
381
#endif
382
    dma->translate = translate;
383
    dma->map = map;
384
    dma->unmap = unmap;
385
}
386

  
387
void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
388
                           DMADirection dir)
389
{
390
    int err;
391
    target_phys_addr_t paddr, plen;
392
    void *buf;
393

  
394
    if (dma->map) {
395
        return dma->map(dma, addr, len, dir);
396
    }
397

  
398
    plen = *len;
399
    err = dma->translate(dma, addr, &paddr, &plen, dir);
400
    if (err) {
401
        return NULL;
402
    }
403

  
404
    /*
405
     * If this is true, the virtual region is contiguous,
406
     * but the translated physical region isn't. We just
407
     * clamp *len, much like cpu_physical_memory_map() does.
408
     */
409
    if (plen < *len) {
410
        *len = plen;
411
    }
412

  
413
    buf = cpu_physical_memory_map(paddr, &plen,
414
                                  dir == DMA_DIRECTION_FROM_DEVICE);
415
    *len = plen;
416

  
417
    return buf;
418
}
419

  
420
void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len,
421
                            DMADirection dir, dma_addr_t access_len)
422
{
423
    if (dma->unmap) {
424
        dma->unmap(dma, buffer, len, dir, access_len);
425
        return;
426
    }
427

  
428
    cpu_physical_memory_unmap(buffer, len,
429
                              dir == DMA_DIRECTION_FROM_DEVICE,
430
                              access_len);
431

  
432
}
b/dma.h
31 31
};
32 32

  
33 33
#if defined(TARGET_PHYS_ADDR_BITS)
34
typedef target_phys_addr_t dma_addr_t;
35 34

  
36
#define DMA_ADDR_BITS TARGET_PHYS_ADDR_BITS
37
#define DMA_ADDR_FMT TARGET_FMT_plx
35
/*
36
 * When an IOMMU is present, bus addresses become distinct from
37
 * CPU/memory physical addresses and may be a different size.  Because
38
 * the IOVA size depends more on the bus than on the platform, we more
39
 * or less have to treat these as 64-bit always to cover all (or at
40
 * least most) cases.
41
 */
42
typedef uint64_t dma_addr_t;
43

  
44
#define DMA_ADDR_BITS 64
45
#define DMA_ADDR_FMT "%" PRIx64
46

  
47
typedef int DMATranslateFunc(DMAContext *dma,
48
                             dma_addr_t addr,
49
                             target_phys_addr_t *paddr,
50
                             target_phys_addr_t *len,
51
                             DMADirection dir);
52
typedef void* DMAMapFunc(DMAContext *dma,
53
                         dma_addr_t addr,
54
                         dma_addr_t *len,
55
                         DMADirection dir);
56
typedef void DMAUnmapFunc(DMAContext *dma,
57
                          void *buffer,
58
                          dma_addr_t len,
59
                          DMADirection dir,
60
                          dma_addr_t access_len);
61

  
62
struct DMAContext {
63
    DMATranslateFunc *translate;
64
    DMAMapFunc *map;
65
    DMAUnmapFunc *unmap;
66
};
67

  
68
static inline bool dma_has_iommu(DMAContext *dma)
69
{
70
    return !!dma;
71
}
38 72

  
39 73
/* Checks that the given range of addresses is valid for DMA.  This is
40 74
 * useful for certain cases, but usually you should just use
41 75
 * dma_memory_{read,write}() and check for errors */
42
static inline bool dma_memory_valid(DMAContext *dma, dma_addr_t addr,
43
                                    dma_addr_t len, DMADirection dir)
76
bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
77
                            DMADirection dir);
78
static inline bool dma_memory_valid(DMAContext *dma,
79
                                    dma_addr_t addr, dma_addr_t len,
80
                                    DMADirection dir)
44 81
{
45
    /* Stub version, with no iommu we assume all bus addresses are valid */
46
    return true;
82
    if (!dma_has_iommu(dma)) {
83
        return true;
84
    } else {
85
        return iommu_dma_memory_valid(dma, addr, len, dir);
86
    }
47 87
}
48 88

  
89
int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
90
                        void *buf, dma_addr_t len, DMADirection dir);
49 91
static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr,
50 92
                                void *buf, dma_addr_t len, DMADirection dir)
51 93
{
52
    /* Stub version when we have no iommu support */
53
    cpu_physical_memory_rw(addr, buf, (target_phys_addr_t)len,
54
                           dir == DMA_DIRECTION_FROM_DEVICE);
55
    return 0;
94
    if (!dma_has_iommu(dma)) {
95
        /* Fast-path for no IOMMU */
96
        cpu_physical_memory_rw(addr, buf, len,
97
                               dir == DMA_DIRECTION_FROM_DEVICE);
98
        return 0;
99
    } else {
100
        return iommu_dma_memory_rw(dma, addr, buf, len, dir);
101
    }
56 102
}
57 103

  
58 104
static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr,
......
68 114
                         DMA_DIRECTION_FROM_DEVICE);
69 115
}
70 116

  
117
int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
118
			 dma_addr_t len);
119

  
71 120
int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len);
72 121

  
122
void *iommu_dma_memory_map(DMAContext *dma,
123
                           dma_addr_t addr, dma_addr_t *len,
124
                           DMADirection dir);
73 125
static inline void *dma_memory_map(DMAContext *dma,
74 126
                                   dma_addr_t addr, dma_addr_t *len,
75 127
                                   DMADirection dir)
76 128
{
77
    target_phys_addr_t xlen = *len;
78
    void *p;
79

  
80
    p = cpu_physical_memory_map(addr, &xlen,
81
                                dir == DMA_DIRECTION_FROM_DEVICE);
82
    *len = xlen;
83
    return p;
129
    if (!dma_has_iommu(dma)) {
130
        target_phys_addr_t xlen = *len;
131
        void *p;
132

  
133
        p = cpu_physical_memory_map(addr, &xlen,
134
                                    dir == DMA_DIRECTION_FROM_DEVICE);
135
        *len = xlen;
136
        return p;
137
    } else {
138
        return iommu_dma_memory_map(dma, addr, len, dir);
139
    }
84 140
}
85 141

  
142
void iommu_dma_memory_unmap(DMAContext *dma,
143
                            void *buffer, dma_addr_t len,
144
                            DMADirection dir, dma_addr_t access_len);
86 145
static inline void dma_memory_unmap(DMAContext *dma,
87 146
                                    void *buffer, dma_addr_t len,
88 147
                                    DMADirection dir, dma_addr_t access_len)
89 148
{
90
    return cpu_physical_memory_unmap(buffer, (target_phys_addr_t)len,
91
                                     dir == DMA_DIRECTION_FROM_DEVICE,
92
                                     access_len);
149
    if (!dma_has_iommu(dma)) {
150
        return cpu_physical_memory_unmap(buffer, (target_phys_addr_t)len,
151
                                         dir == DMA_DIRECTION_FROM_DEVICE,
152
                                         access_len);
153
    } else {
154
        iommu_dma_memory_unmap(dma, buffer, len, dir, access_len);
155
    }
93 156
}
94 157

  
95 158
#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
......
130 193

  
131 194
#undef DEFINE_LDST_DMA
132 195

  
196
void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
197
                      DMAMapFunc map, DMAUnmapFunc unmap);
198

  
133 199
struct ScatterGatherEntry {
134 200
    dma_addr_t base;
135 201
    dma_addr_t len;
b/hw/qdev-dma.h
6 6
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 7
 * See the COPYING file in the top-level directory.
8 8
 */
9
#include "qdev-addr.h"
10

  
11 9
#define DEFINE_PROP_DMAADDR(_n, _s, _f, _d)                               \
12
    DEFINE_PROP_TADDR(_n, _s, _f, _d)
10
    DEFINE_PROP_HEX64(_n, _s, _f, _d)

Also available in: Unified diff