Statistics
| Branch: | Revision:

root / qemu-barrier.h @ 7c9958b0

History | View | Annotate | Download (1.8 kB)

1 85199474 Marcelo Tosatti
#ifndef __QEMU_BARRIER_H
2 85199474 Marcelo Tosatti
#define __QEMU_BARRIER_H 1
3 85199474 Marcelo Tosatti
4 1d93f0f0 Jan Kiszka
/* Compiler barrier */
5 1d93f0f0 Jan Kiszka
#define barrier()   asm volatile("" ::: "memory")
6 1d93f0f0 Jan Kiszka
7 a281ebc1 Michael S. Tsirkin
#if defined(__i386__)
8 e2251708 David Gibson
9 e2251708 David Gibson
/*
10 a821ce59 Michael S. Tsirkin
 * Because of the strongly ordered x86 storage model, wmb() and rmb() are nops
11 e2251708 David Gibson
 * on x86(well, a compiler barrier only).  Well, at least as long as
12 e2251708 David Gibson
 * qemu doesn't do accesses to write-combining memory or non-temporal
13 e2251708 David Gibson
 * load/stores from C code.
14 e2251708 David Gibson
 */
15 e2251708 David Gibson
#define smp_wmb()   barrier()
16 a821ce59 Michael S. Tsirkin
#define smp_rmb()   barrier()
17 a281ebc1 Michael S. Tsirkin
/*
18 a281ebc1 Michael S. Tsirkin
 * We use GCC builtin if it's available, as that can use
19 a281ebc1 Michael S. Tsirkin
 * mfence on 32 bit as well, e.g. if built with -march=pentium-m.
20 a281ebc1 Michael S. Tsirkin
 * However, on i386, there seem to be known bugs as recently as 4.3.
21 a281ebc1 Michael S. Tsirkin
 * */
22 a281ebc1 Michael S. Tsirkin
#if defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
23 a281ebc1 Michael S. Tsirkin
#define smp_mb() __sync_synchronize()
24 a281ebc1 Michael S. Tsirkin
#else
25 a281ebc1 Michael S. Tsirkin
#define smp_mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
26 a281ebc1 Michael S. Tsirkin
#endif
27 a281ebc1 Michael S. Tsirkin
28 a281ebc1 Michael S. Tsirkin
#elif defined(__x86_64__)
29 a281ebc1 Michael S. Tsirkin
30 a281ebc1 Michael S. Tsirkin
#define smp_wmb()   barrier()
31 a821ce59 Michael S. Tsirkin
#define smp_rmb()   barrier()
32 a281ebc1 Michael S. Tsirkin
#define smp_mb() asm volatile("mfence" ::: "memory")
33 e2251708 David Gibson
34 463ce4ae Eric Sunshine
#elif defined(_ARCH_PPC)
35 e2251708 David Gibson
36 e2251708 David Gibson
/*
37 a281ebc1 Michael S. Tsirkin
 * We use an eieio() for wmb() on powerpc.  This assumes we don't
38 e2251708 David Gibson
 * need to order cacheable and non-cacheable stores with respect to
39 e2251708 David Gibson
 * each other
40 e2251708 David Gibson
 */
41 e2251708 David Gibson
#define smp_wmb()   asm volatile("eieio" ::: "memory")
42 a821ce59 Michael S. Tsirkin
43 a821ce59 Michael S. Tsirkin
#if defined(__powerpc64__)
44 a821ce59 Michael S. Tsirkin
#define smp_rmb()   asm volatile("lwsync" ::: "memory")
45 a821ce59 Michael S. Tsirkin
#else
46 a821ce59 Michael S. Tsirkin
#define smp_rmb()   asm volatile("sync" ::: "memory")
47 a821ce59 Michael S. Tsirkin
#endif
48 a821ce59 Michael S. Tsirkin
49 a281ebc1 Michael S. Tsirkin
#define smp_mb()   asm volatile("sync" ::: "memory")
50 e2251708 David Gibson
51 e2251708 David Gibson
#else
52 e2251708 David Gibson
53 e2251708 David Gibson
/*
54 e2251708 David Gibson
 * For (host) platforms we don't have explicit barrier definitions
55 e2251708 David Gibson
 * for, we use the gcc __sync_synchronize() primitive to generate a
56 e2251708 David Gibson
 * full barrier.  This should be safe on all platforms, though it may
57 a821ce59 Michael S. Tsirkin
 * be overkill for wmb() and rmb().
58 e2251708 David Gibson
 */
59 e2251708 David Gibson
#define smp_wmb()   __sync_synchronize()
60 a281ebc1 Michael S. Tsirkin
#define smp_mb()   __sync_synchronize()
61 a821ce59 Michael S. Tsirkin
#define smp_rmb()   __sync_synchronize()
62 e2251708 David Gibson
63 e2251708 David Gibson
#endif
64 e2251708 David Gibson
65 85199474 Marcelo Tosatti
#endif