Statistics
| Branch: | Revision:

root / qemu-barrier.h @ 463ce4ae

History | View | Annotate | Download (978 Bytes)

1 85199474 Marcelo Tosatti
#ifndef __QEMU_BARRIER_H
2 85199474 Marcelo Tosatti
#define __QEMU_BARRIER_H 1
3 85199474 Marcelo Tosatti
4 1d93f0f0 Jan Kiszka
/* Compiler barrier */
5 1d93f0f0 Jan Kiszka
#define barrier()   asm volatile("" ::: "memory")
6 1d93f0f0 Jan Kiszka
7 e2251708 David Gibson
#if defined(__i386__) || defined(__x86_64__)
8 e2251708 David Gibson
9 e2251708 David Gibson
/*
10 e2251708 David Gibson
 * Because of the strongly ordered x86 storage model, wmb() is a nop
11 e2251708 David Gibson
 * on x86(well, a compiler barrier only).  Well, at least as long as
12 e2251708 David Gibson
 * qemu doesn't do accesses to write-combining memory or non-temporal
13 e2251708 David Gibson
 * load/stores from C code.
14 e2251708 David Gibson
 */
15 e2251708 David Gibson
#define smp_wmb()   barrier()
16 e2251708 David Gibson
17 463ce4ae Eric Sunshine
#elif defined(_ARCH_PPC)
18 e2251708 David Gibson
19 e2251708 David Gibson
/*
20 e2251708 David Gibson
 * We use an eieio() for a wmb() on powerpc.  This assumes we don't
21 e2251708 David Gibson
 * need to order cacheable and non-cacheable stores with respect to
22 e2251708 David Gibson
 * each other
23 e2251708 David Gibson
 */
24 e2251708 David Gibson
#define smp_wmb()   asm volatile("eieio" ::: "memory")
25 e2251708 David Gibson
26 e2251708 David Gibson
#else
27 e2251708 David Gibson
28 e2251708 David Gibson
/*
29 e2251708 David Gibson
 * For (host) platforms we don't have explicit barrier definitions
30 e2251708 David Gibson
 * for, we use the gcc __sync_synchronize() primitive to generate a
31 e2251708 David Gibson
 * full barrier.  This should be safe on all platforms, though it may
32 e2251708 David Gibson
 * be overkill.
33 e2251708 David Gibson
 */
34 e2251708 David Gibson
#define smp_wmb()   __sync_synchronize()
35 e2251708 David Gibson
36 e2251708 David Gibson
#endif
37 e2251708 David Gibson
38 85199474 Marcelo Tosatti
#endif