Statistics
| Branch: | Revision:

root / qemu-lock.h @ 2a424990

History | View | Annotate | Download (6 kB)

1 d5975363 pbrook
/*
2 d5975363 pbrook
 *  Copyright (c) 2003 Fabrice Bellard
3 d5975363 pbrook
 *
4 d5975363 pbrook
 * This library is free software; you can redistribute it and/or
5 d5975363 pbrook
 * modify it under the terms of the GNU Lesser General Public
6 d5975363 pbrook
 * License as published by the Free Software Foundation; either
7 d5975363 pbrook
 * version 2 of the License, or (at your option) any later version.
8 d5975363 pbrook
 *
9 d5975363 pbrook
 * This library is distributed in the hope that it will be useful,
10 d5975363 pbrook
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 d5975363 pbrook
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12 d5975363 pbrook
 * Lesser General Public License for more details.
13 d5975363 pbrook
 *
14 d5975363 pbrook
 * You should have received a copy of the GNU Lesser General Public
15 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>
16 d5975363 pbrook
 */
17 d5975363 pbrook
18 d5975363 pbrook
/* Locking primitives.  Most of this code should be redundant -
19 d5975363 pbrook
   system emulation doesn't need/use locking, NPTL userspace uses
20 d5975363 pbrook
   pthread mutexes, and non-NPTL userspace isn't threadsafe anyway.
21 d5975363 pbrook
   In either case a spinlock is probably the wrong kind of lock.
22 d5975363 pbrook
   Spinlocks are only good if you know annother CPU has the lock and is
23 d5975363 pbrook
   likely to release it soon.  In environments where you have more threads
24 d5975363 pbrook
   than physical CPUs (the extreme case being a single CPU host) a spinlock
25 d5975363 pbrook
   simply wastes CPU until the OS decides to preempt it.  */
26 2f7bb878 Juan Quintela
#if defined(CONFIG_USE_NPTL)
27 d5975363 pbrook
28 d5975363 pbrook
#include <pthread.h>
29 d5975363 pbrook
#define spin_lock pthread_mutex_lock
30 d5975363 pbrook
#define spin_unlock pthread_mutex_unlock
31 c227f099 Anthony Liguori
#define spinlock_t pthread_mutex_t
32 d5975363 pbrook
#define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
33 d5975363 pbrook
34 d5975363 pbrook
#else
35 d5975363 pbrook
36 d5975363 pbrook
#if defined(__hppa__)
37 d5975363 pbrook
38 c227f099 Anthony Liguori
typedef int spinlock_t[4];
39 d5975363 pbrook
40 d5975363 pbrook
#define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 }
41 d5975363 pbrook
42 c227f099 Anthony Liguori
static inline void resetlock (spinlock_t *p)
43 d5975363 pbrook
{
44 d5975363 pbrook
    (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1;
45 d5975363 pbrook
}
46 d5975363 pbrook
47 d5975363 pbrook
#else
48 d5975363 pbrook
49 c227f099 Anthony Liguori
typedef int spinlock_t;
50 d5975363 pbrook
51 d5975363 pbrook
#define SPIN_LOCK_UNLOCKED 0
52 d5975363 pbrook
53 c227f099 Anthony Liguori
static inline void resetlock (spinlock_t *p)
54 d5975363 pbrook
{
55 d5975363 pbrook
    *p = SPIN_LOCK_UNLOCKED;
56 d5975363 pbrook
}
57 d5975363 pbrook
58 d5975363 pbrook
#endif
59 d5975363 pbrook
60 e58ffeb3 malc
#if defined(_ARCH_PPC)
61 d5975363 pbrook
static inline int testandset (int *p)
62 d5975363 pbrook
{
63 d5975363 pbrook
    int ret;
64 d5975363 pbrook
    __asm__ __volatile__ (
65 14f87098 malc
                          "      lwarx %0,0,%1\n"
66 d5975363 pbrook
                          "      xor. %0,%3,%0\n"
67 14f87098 malc
                          "      bne $+12\n"
68 d5975363 pbrook
                          "      stwcx. %2,0,%1\n"
69 14f87098 malc
                          "      bne- $-16\n"
70 d5975363 pbrook
                          : "=&r" (ret)
71 d5975363 pbrook
                          : "r" (p), "r" (1), "r" (0)
72 d5975363 pbrook
                          : "cr0", "memory");
73 d5975363 pbrook
    return ret;
74 d5975363 pbrook
}
75 d5975363 pbrook
#elif defined(__i386__)
76 d5975363 pbrook
static inline int testandset (int *p)
77 d5975363 pbrook
{
78 d5975363 pbrook
    long int readval = 0;
79 d5975363 pbrook
80 d5975363 pbrook
    __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
81 d5975363 pbrook
                          : "+m" (*p), "+a" (readval)
82 d5975363 pbrook
                          : "r" (1)
83 d5975363 pbrook
                          : "cc");
84 d5975363 pbrook
    return readval;
85 d5975363 pbrook
}
86 d5975363 pbrook
#elif defined(__x86_64__)
87 d5975363 pbrook
static inline int testandset (int *p)
88 d5975363 pbrook
{
89 d5975363 pbrook
    long int readval = 0;
90 d5975363 pbrook
91 d5975363 pbrook
    __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
92 d5975363 pbrook
                          : "+m" (*p), "+a" (readval)
93 d5975363 pbrook
                          : "r" (1)
94 d5975363 pbrook
                          : "cc");
95 d5975363 pbrook
    return readval;
96 d5975363 pbrook
}
97 d5975363 pbrook
#elif defined(__s390__)
98 d5975363 pbrook
static inline int testandset (int *p)
99 d5975363 pbrook
{
100 d5975363 pbrook
    int ret;
101 d5975363 pbrook
102 d5975363 pbrook
    __asm__ __volatile__ ("0: cs    %0,%1,0(%2)\n"
103 d5975363 pbrook
                          "   jl    0b"
104 d5975363 pbrook
                          : "=&d" (ret)
105 d5975363 pbrook
                          : "r" (1), "a" (p), "0" (*p)
106 d5975363 pbrook
                          : "cc", "memory" );
107 d5975363 pbrook
    return ret;
108 d5975363 pbrook
}
109 d5975363 pbrook
#elif defined(__alpha__)
110 d5975363 pbrook
static inline int testandset (int *p)
111 d5975363 pbrook
{
112 d5975363 pbrook
    int ret;
113 d5975363 pbrook
    unsigned long one;
114 d5975363 pbrook
115 d5975363 pbrook
    __asm__ __volatile__ ("0:        mov 1,%2\n"
116 d5975363 pbrook
                          "        ldl_l %0,%1\n"
117 d5975363 pbrook
                          "        stl_c %2,%1\n"
118 d5975363 pbrook
                          "        beq %2,1f\n"
119 d5975363 pbrook
                          ".subsection 2\n"
120 d5975363 pbrook
                          "1:        br 0b\n"
121 d5975363 pbrook
                          ".previous"
122 d5975363 pbrook
                          : "=r" (ret), "=m" (*p), "=r" (one)
123 d5975363 pbrook
                          : "m" (*p));
124 d5975363 pbrook
    return ret;
125 d5975363 pbrook
}
126 d5975363 pbrook
#elif defined(__sparc__)
127 d5975363 pbrook
static inline int testandset (int *p)
128 d5975363 pbrook
{
129 d5975363 pbrook
        int ret;
130 d5975363 pbrook
131 d5975363 pbrook
        __asm__ __volatile__("ldstub        [%1], %0"
132 d5975363 pbrook
                             : "=r" (ret)
133 d5975363 pbrook
                             : "r" (p)
134 d5975363 pbrook
                             : "memory");
135 d5975363 pbrook
136 d5975363 pbrook
        return (ret ? 1 : 0);
137 d5975363 pbrook
}
138 d5975363 pbrook
#elif defined(__arm__)
139 d5975363 pbrook
static inline int testandset (int *spinlock)
140 d5975363 pbrook
{
141 d5975363 pbrook
    register unsigned int ret;
142 d5975363 pbrook
    __asm__ __volatile__("swp %0, %1, [%2]"
143 d5975363 pbrook
                         : "=r"(ret)
144 d5975363 pbrook
                         : "0"(1), "r"(spinlock));
145 d5975363 pbrook
146 d5975363 pbrook
    return ret;
147 d5975363 pbrook
}
148 d5975363 pbrook
#elif defined(__mc68000)
149 d5975363 pbrook
static inline int testandset (int *p)
150 d5975363 pbrook
{
151 d5975363 pbrook
    char ret;
152 d5975363 pbrook
    __asm__ __volatile__("tas %1; sne %0"
153 d5975363 pbrook
                         : "=r" (ret)
154 d5975363 pbrook
                         : "m" (p)
155 d5975363 pbrook
                         : "cc","memory");
156 d5975363 pbrook
    return ret;
157 d5975363 pbrook
}
158 d5975363 pbrook
#elif defined(__hppa__)
159 d5975363 pbrook
160 d5975363 pbrook
/* Because malloc only guarantees 8-byte alignment for malloc'd data,
161 d5975363 pbrook
   and GCC only guarantees 8-byte alignment for stack locals, we can't
162 d5975363 pbrook
   be assured of 16-byte alignment for atomic lock data even if we
163 d5975363 pbrook
   specify "__attribute ((aligned(16)))" in the type declaration.  So,
164 d5975363 pbrook
   we use a struct containing an array of four ints for the atomic lock
165 d5975363 pbrook
   type and dynamically select the 16-byte aligned int from the array
166 d5975363 pbrook
   for the semaphore.  */
167 d5975363 pbrook
#define __PA_LDCW_ALIGNMENT 16
168 d5975363 pbrook
static inline void *ldcw_align (void *p) {
169 d5975363 pbrook
    unsigned long a = (unsigned long)p;
170 d5975363 pbrook
    a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1);
171 d5975363 pbrook
    return (void *)a;
172 d5975363 pbrook
}
173 d5975363 pbrook
174 c227f099 Anthony Liguori
static inline int testandset (spinlock_t *p)
175 d5975363 pbrook
{
176 d5975363 pbrook
    unsigned int ret;
177 d5975363 pbrook
    p = ldcw_align(p);
178 d5975363 pbrook
    __asm__ __volatile__("ldcw 0(%1),%0"
179 d5975363 pbrook
                         : "=r" (ret)
180 d5975363 pbrook
                         : "r" (p)
181 d5975363 pbrook
                         : "memory" );
182 d5975363 pbrook
    return !ret;
183 d5975363 pbrook
}
184 d5975363 pbrook
185 d5975363 pbrook
#elif defined(__ia64)
186 d5975363 pbrook
187 d5975363 pbrook
#include <ia64intrin.h>
188 d5975363 pbrook
189 d5975363 pbrook
static inline int testandset (int *p)
190 d5975363 pbrook
{
191 d5975363 pbrook
    return __sync_lock_test_and_set (p, 1);
192 d5975363 pbrook
}
193 d5975363 pbrook
#elif defined(__mips__)
194 d5975363 pbrook
static inline int testandset (int *p)
195 d5975363 pbrook
{
196 d5975363 pbrook
    int ret;
197 d5975363 pbrook
198 d5975363 pbrook
    __asm__ __volatile__ (
199 d5975363 pbrook
        "        .set push                \n"
200 d5975363 pbrook
        "        .set noat                \n"
201 d5975363 pbrook
        "        .set mips2                \n"
202 d5975363 pbrook
        "1:        li        $1, 1                \n"
203 d5975363 pbrook
        "        ll        %0, %1                \n"
204 d5975363 pbrook
        "        sc        $1, %1                \n"
205 d5975363 pbrook
        "        beqz        $1, 1b                \n"
206 d5975363 pbrook
        "        .set pop                "
207 d5975363 pbrook
        : "=r" (ret), "+R" (*p)
208 d5975363 pbrook
        :
209 d5975363 pbrook
        : "memory");
210 d5975363 pbrook
211 d5975363 pbrook
    return ret;
212 d5975363 pbrook
}
213 d5975363 pbrook
#else
214 d5975363 pbrook
#error unimplemented CPU support
215 d5975363 pbrook
#endif
216 d5975363 pbrook
217 d5975363 pbrook
#if defined(CONFIG_USER_ONLY)
218 c227f099 Anthony Liguori
static inline void spin_lock(spinlock_t *lock)
219 d5975363 pbrook
{
220 d5975363 pbrook
    while (testandset(lock));
221 d5975363 pbrook
}
222 d5975363 pbrook
223 c227f099 Anthony Liguori
static inline void spin_unlock(spinlock_t *lock)
224 d5975363 pbrook
{
225 d5975363 pbrook
    resetlock(lock);
226 d5975363 pbrook
}
227 d5975363 pbrook
228 c227f099 Anthony Liguori
static inline int spin_trylock(spinlock_t *lock)
229 d5975363 pbrook
{
230 d5975363 pbrook
    return !testandset(lock);
231 d5975363 pbrook
}
232 d5975363 pbrook
#else
233 c227f099 Anthony Liguori
static inline void spin_lock(spinlock_t *lock)
234 d5975363 pbrook
{
235 d5975363 pbrook
}
236 d5975363 pbrook
237 c227f099 Anthony Liguori
static inline void spin_unlock(spinlock_t *lock)
238 d5975363 pbrook
{
239 d5975363 pbrook
}
240 d5975363 pbrook
241 c227f099 Anthony Liguori
static inline int spin_trylock(spinlock_t *lock)
242 d5975363 pbrook
{
243 d5975363 pbrook
    return 1;
244 d5975363 pbrook
}
245 d5975363 pbrook
#endif
246 d5975363 pbrook
247 d5975363 pbrook
#endif