Statistics
| Branch: | Revision:

root / cpu-all.h @ b09ea7d5

History | View | Annotate | Download (26 kB)

1
/*
2
 * defines common to all virtual CPUs
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#ifndef CPU_ALL_H
21
#define CPU_ALL_H
22

    
23
#include "qemu-common.h"
24
#include "cpu-common.h"
25

    
26
/* some important defines:
27
 *
28
 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
29
 * memory accesses.
30
 *
31
 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
32
 * otherwise little endian.
33
 *
34
 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
35
 *
36
 * TARGET_WORDS_BIGENDIAN : same for target cpu
37
 */
38

    
39
#include "softfloat.h"
40

    
41
#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
42
#define BSWAP_NEEDED
43
#endif
44

    
45
#ifdef BSWAP_NEEDED
46

    
47
static inline uint16_t tswap16(uint16_t s)
48
{
49
    return bswap16(s);
50
}
51

    
52
static inline uint32_t tswap32(uint32_t s)
53
{
54
    return bswap32(s);
55
}
56

    
57
static inline uint64_t tswap64(uint64_t s)
58
{
59
    return bswap64(s);
60
}
61

    
62
static inline void tswap16s(uint16_t *s)
63
{
64
    *s = bswap16(*s);
65
}
66

    
67
static inline void tswap32s(uint32_t *s)
68
{
69
    *s = bswap32(*s);
70
}
71

    
72
static inline void tswap64s(uint64_t *s)
73
{
74
    *s = bswap64(*s);
75
}
76

    
77
#else
78

    
79
static inline uint16_t tswap16(uint16_t s)
80
{
81
    return s;
82
}
83

    
84
static inline uint32_t tswap32(uint32_t s)
85
{
86
    return s;
87
}
88

    
89
static inline uint64_t tswap64(uint64_t s)
90
{
91
    return s;
92
}
93

    
94
static inline void tswap16s(uint16_t *s)
95
{
96
}
97

    
98
static inline void tswap32s(uint32_t *s)
99
{
100
}
101

    
102
static inline void tswap64s(uint64_t *s)
103
{
104
}
105

    
106
#endif
107

    
108
#if TARGET_LONG_SIZE == 4
109
#define tswapl(s) tswap32(s)
110
#define tswapls(s) tswap32s((uint32_t *)(s))
111
#define bswaptls(s) bswap32s(s)
112
#else
113
#define tswapl(s) tswap64(s)
114
#define tswapls(s) tswap64s((uint64_t *)(s))
115
#define bswaptls(s) bswap64s(s)
116
#endif
117

    
118
typedef union {
119
    float32 f;
120
    uint32_t l;
121
} CPU_FloatU;
122

    
123
/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
124
   endian ! */
125
typedef union {
126
    float64 d;
127
#if defined(WORDS_BIGENDIAN) \
128
    || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
129
    struct {
130
        uint32_t upper;
131
        uint32_t lower;
132
    } l;
133
#else
134
    struct {
135
        uint32_t lower;
136
        uint32_t upper;
137
    } l;
138
#endif
139
    uint64_t ll;
140
} CPU_DoubleU;
141

    
142
#ifdef TARGET_SPARC
143
typedef union {
144
    float128 q;
145
#if defined(WORDS_BIGENDIAN) \
146
    || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
147
    struct {
148
        uint32_t upmost;
149
        uint32_t upper;
150
        uint32_t lower;
151
        uint32_t lowest;
152
    } l;
153
    struct {
154
        uint64_t upper;
155
        uint64_t lower;
156
    } ll;
157
#else
158
    struct {
159
        uint32_t lowest;
160
        uint32_t lower;
161
        uint32_t upper;
162
        uint32_t upmost;
163
    } l;
164
    struct {
165
        uint64_t lower;
166
        uint64_t upper;
167
    } ll;
168
#endif
169
} CPU_QuadU;
170
#endif
171

    
172
/* CPU memory access without any memory or io remapping */
173

    
174
/*
175
 * the generic syntax for the memory accesses is:
176
 *
177
 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
178
 *
179
 * store: st{type}{size}{endian}_{access_type}(ptr, val)
180
 *
181
 * type is:
182
 * (empty): integer access
183
 *   f    : float access
184
 *
185
 * sign is:
186
 * (empty): for floats or 32 bit size
187
 *   u    : unsigned
188
 *   s    : signed
189
 *
190
 * size is:
191
 *   b: 8 bits
192
 *   w: 16 bits
193
 *   l: 32 bits
194
 *   q: 64 bits
195
 *
196
 * endian is:
197
 * (empty): target cpu endianness or 8 bit access
198
 *   r    : reversed target cpu endianness (not implemented yet)
199
 *   be   : big endian (not implemented yet)
200
 *   le   : little endian (not implemented yet)
201
 *
202
 * access_type is:
203
 *   raw    : host memory access
204
 *   user   : user mode access using soft MMU
205
 *   kernel : kernel mode access using soft MMU
206
 */
207
static inline int ldub_p(const void *ptr)
208
{
209
    return *(uint8_t *)ptr;
210
}
211

    
212
static inline int ldsb_p(const void *ptr)
213
{
214
    return *(int8_t *)ptr;
215
}
216

    
217
static inline void stb_p(void *ptr, int v)
218
{
219
    *(uint8_t *)ptr = v;
220
}
221

    
222
/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
223
   kernel handles unaligned load/stores may give better results, but
224
   it is a system wide setting : bad */
225
#if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
226

    
227
/* conservative code for little endian unaligned accesses */
228
static inline int lduw_le_p(const void *ptr)
229
{
230
#ifdef _ARCH_PPC
231
    int val;
232
    __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
233
    return val;
234
#else
235
    const uint8_t *p = ptr;
236
    return p[0] | (p[1] << 8);
237
#endif
238
}
239

    
240
static inline int ldsw_le_p(const void *ptr)
241
{
242
#ifdef _ARCH_PPC
243
    int val;
244
    __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
245
    return (int16_t)val;
246
#else
247
    const uint8_t *p = ptr;
248
    return (int16_t)(p[0] | (p[1] << 8));
249
#endif
250
}
251

    
252
static inline int ldl_le_p(const void *ptr)
253
{
254
#ifdef _ARCH_PPC
255
    int val;
256
    __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
257
    return val;
258
#else
259
    const uint8_t *p = ptr;
260
    return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
261
#endif
262
}
263

    
264
static inline uint64_t ldq_le_p(const void *ptr)
265
{
266
    const uint8_t *p = ptr;
267
    uint32_t v1, v2;
268
    v1 = ldl_le_p(p);
269
    v2 = ldl_le_p(p + 4);
270
    return v1 | ((uint64_t)v2 << 32);
271
}
272

    
273
static inline void stw_le_p(void *ptr, int v)
274
{
275
#ifdef _ARCH_PPC
276
    __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
277
#else
278
    uint8_t *p = ptr;
279
    p[0] = v;
280
    p[1] = v >> 8;
281
#endif
282
}
283

    
284
static inline void stl_le_p(void *ptr, int v)
285
{
286
#ifdef _ARCH_PPC
287
    __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
288
#else
289
    uint8_t *p = ptr;
290
    p[0] = v;
291
    p[1] = v >> 8;
292
    p[2] = v >> 16;
293
    p[3] = v >> 24;
294
#endif
295
}
296

    
297
static inline void stq_le_p(void *ptr, uint64_t v)
298
{
299
    uint8_t *p = ptr;
300
    stl_le_p(p, (uint32_t)v);
301
    stl_le_p(p + 4, v >> 32);
302
}
303

    
304
/* float access */
305

    
306
static inline float32 ldfl_le_p(const void *ptr)
307
{
308
    union {
309
        float32 f;
310
        uint32_t i;
311
    } u;
312
    u.i = ldl_le_p(ptr);
313
    return u.f;
314
}
315

    
316
static inline void stfl_le_p(void *ptr, float32 v)
317
{
318
    union {
319
        float32 f;
320
        uint32_t i;
321
    } u;
322
    u.f = v;
323
    stl_le_p(ptr, u.i);
324
}
325

    
326
static inline float64 ldfq_le_p(const void *ptr)
327
{
328
    CPU_DoubleU u;
329
    u.l.lower = ldl_le_p(ptr);
330
    u.l.upper = ldl_le_p(ptr + 4);
331
    return u.d;
332
}
333

    
334
static inline void stfq_le_p(void *ptr, float64 v)
335
{
336
    CPU_DoubleU u;
337
    u.d = v;
338
    stl_le_p(ptr, u.l.lower);
339
    stl_le_p(ptr + 4, u.l.upper);
340
}
341

    
342
#else
343

    
344
static inline int lduw_le_p(const void *ptr)
345
{
346
    return *(uint16_t *)ptr;
347
}
348

    
349
static inline int ldsw_le_p(const void *ptr)
350
{
351
    return *(int16_t *)ptr;
352
}
353

    
354
static inline int ldl_le_p(const void *ptr)
355
{
356
    return *(uint32_t *)ptr;
357
}
358

    
359
static inline uint64_t ldq_le_p(const void *ptr)
360
{
361
    return *(uint64_t *)ptr;
362
}
363

    
364
static inline void stw_le_p(void *ptr, int v)
365
{
366
    *(uint16_t *)ptr = v;
367
}
368

    
369
static inline void stl_le_p(void *ptr, int v)
370
{
371
    *(uint32_t *)ptr = v;
372
}
373

    
374
static inline void stq_le_p(void *ptr, uint64_t v)
375
{
376
    *(uint64_t *)ptr = v;
377
}
378

    
379
/* float access */
380

    
381
static inline float32 ldfl_le_p(const void *ptr)
382
{
383
    return *(float32 *)ptr;
384
}
385

    
386
static inline float64 ldfq_le_p(const void *ptr)
387
{
388
    return *(float64 *)ptr;
389
}
390

    
391
static inline void stfl_le_p(void *ptr, float32 v)
392
{
393
    *(float32 *)ptr = v;
394
}
395

    
396
static inline void stfq_le_p(void *ptr, float64 v)
397
{
398
    *(float64 *)ptr = v;
399
}
400
#endif
401

    
402
#if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
403

    
404
static inline int lduw_be_p(const void *ptr)
405
{
406
#if defined(__i386__)
407
    int val;
408
    asm volatile ("movzwl %1, %0\n"
409
                  "xchgb %b0, %h0\n"
410
                  : "=q" (val)
411
                  : "m" (*(uint16_t *)ptr));
412
    return val;
413
#else
414
    const uint8_t *b = ptr;
415
    return ((b[0] << 8) | b[1]);
416
#endif
417
}
418

    
419
static inline int ldsw_be_p(const void *ptr)
420
{
421
#if defined(__i386__)
422
    int val;
423
    asm volatile ("movzwl %1, %0\n"
424
                  "xchgb %b0, %h0\n"
425
                  : "=q" (val)
426
                  : "m" (*(uint16_t *)ptr));
427
    return (int16_t)val;
428
#else
429
    const uint8_t *b = ptr;
430
    return (int16_t)((b[0] << 8) | b[1]);
431
#endif
432
}
433

    
434
static inline int ldl_be_p(const void *ptr)
435
{
436
#if defined(__i386__) || defined(__x86_64__)
437
    int val;
438
    asm volatile ("movl %1, %0\n"
439
                  "bswap %0\n"
440
                  : "=r" (val)
441
                  : "m" (*(uint32_t *)ptr));
442
    return val;
443
#else
444
    const uint8_t *b = ptr;
445
    return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
446
#endif
447
}
448

    
449
static inline uint64_t ldq_be_p(const void *ptr)
450
{
451
    uint32_t a,b;
452
    a = ldl_be_p(ptr);
453
    b = ldl_be_p((uint8_t *)ptr + 4);
454
    return (((uint64_t)a<<32)|b);
455
}
456

    
457
static inline void stw_be_p(void *ptr, int v)
458
{
459
#if defined(__i386__)
460
    asm volatile ("xchgb %b0, %h0\n"
461
                  "movw %w0, %1\n"
462
                  : "=q" (v)
463
                  : "m" (*(uint16_t *)ptr), "0" (v));
464
#else
465
    uint8_t *d = (uint8_t *) ptr;
466
    d[0] = v >> 8;
467
    d[1] = v;
468
#endif
469
}
470

    
471
static inline void stl_be_p(void *ptr, int v)
472
{
473
#if defined(__i386__) || defined(__x86_64__)
474
    asm volatile ("bswap %0\n"
475
                  "movl %0, %1\n"
476
                  : "=r" (v)
477
                  : "m" (*(uint32_t *)ptr), "0" (v));
478
#else
479
    uint8_t *d = (uint8_t *) ptr;
480
    d[0] = v >> 24;
481
    d[1] = v >> 16;
482
    d[2] = v >> 8;
483
    d[3] = v;
484
#endif
485
}
486

    
487
static inline void stq_be_p(void *ptr, uint64_t v)
488
{
489
    stl_be_p(ptr, v >> 32);
490
    stl_be_p((uint8_t *)ptr + 4, v);
491
}
492

    
493
/* float access */
494

    
495
static inline float32 ldfl_be_p(const void *ptr)
496
{
497
    union {
498
        float32 f;
499
        uint32_t i;
500
    } u;
501
    u.i = ldl_be_p(ptr);
502
    return u.f;
503
}
504

    
505
static inline void stfl_be_p(void *ptr, float32 v)
506
{
507
    union {
508
        float32 f;
509
        uint32_t i;
510
    } u;
511
    u.f = v;
512
    stl_be_p(ptr, u.i);
513
}
514

    
515
static inline float64 ldfq_be_p(const void *ptr)
516
{
517
    CPU_DoubleU u;
518
    u.l.upper = ldl_be_p(ptr);
519
    u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
520
    return u.d;
521
}
522

    
523
static inline void stfq_be_p(void *ptr, float64 v)
524
{
525
    CPU_DoubleU u;
526
    u.d = v;
527
    stl_be_p(ptr, u.l.upper);
528
    stl_be_p((uint8_t *)ptr + 4, u.l.lower);
529
}
530

    
531
#else
532

    
533
static inline int lduw_be_p(const void *ptr)
534
{
535
    return *(uint16_t *)ptr;
536
}
537

    
538
static inline int ldsw_be_p(const void *ptr)
539
{
540
    return *(int16_t *)ptr;
541
}
542

    
543
static inline int ldl_be_p(const void *ptr)
544
{
545
    return *(uint32_t *)ptr;
546
}
547

    
548
static inline uint64_t ldq_be_p(const void *ptr)
549
{
550
    return *(uint64_t *)ptr;
551
}
552

    
553
static inline void stw_be_p(void *ptr, int v)
554
{
555
    *(uint16_t *)ptr = v;
556
}
557

    
558
static inline void stl_be_p(void *ptr, int v)
559
{
560
    *(uint32_t *)ptr = v;
561
}
562

    
563
static inline void stq_be_p(void *ptr, uint64_t v)
564
{
565
    *(uint64_t *)ptr = v;
566
}
567

    
568
/* float access */
569

    
570
static inline float32 ldfl_be_p(const void *ptr)
571
{
572
    return *(float32 *)ptr;
573
}
574

    
575
static inline float64 ldfq_be_p(const void *ptr)
576
{
577
    return *(float64 *)ptr;
578
}
579

    
580
static inline void stfl_be_p(void *ptr, float32 v)
581
{
582
    *(float32 *)ptr = v;
583
}
584

    
585
static inline void stfq_be_p(void *ptr, float64 v)
586
{
587
    *(float64 *)ptr = v;
588
}
589

    
590
#endif
591

    
592
/* target CPU memory access functions */
593
#if defined(TARGET_WORDS_BIGENDIAN)
594
#define lduw_p(p) lduw_be_p(p)
595
#define ldsw_p(p) ldsw_be_p(p)
596
#define ldl_p(p) ldl_be_p(p)
597
#define ldq_p(p) ldq_be_p(p)
598
#define ldfl_p(p) ldfl_be_p(p)
599
#define ldfq_p(p) ldfq_be_p(p)
600
#define stw_p(p, v) stw_be_p(p, v)
601
#define stl_p(p, v) stl_be_p(p, v)
602
#define stq_p(p, v) stq_be_p(p, v)
603
#define stfl_p(p, v) stfl_be_p(p, v)
604
#define stfq_p(p, v) stfq_be_p(p, v)
605
#else
606
#define lduw_p(p) lduw_le_p(p)
607
#define ldsw_p(p) ldsw_le_p(p)
608
#define ldl_p(p) ldl_le_p(p)
609
#define ldq_p(p) ldq_le_p(p)
610
#define ldfl_p(p) ldfl_le_p(p)
611
#define ldfq_p(p) ldfq_le_p(p)
612
#define stw_p(p, v) stw_le_p(p, v)
613
#define stl_p(p, v) stl_le_p(p, v)
614
#define stq_p(p, v) stq_le_p(p, v)
615
#define stfl_p(p, v) stfl_le_p(p, v)
616
#define stfq_p(p, v) stfq_le_p(p, v)
617
#endif
618

    
619
/* MMU memory access macros */
620

    
621
#if defined(CONFIG_USER_ONLY)
622
#include <assert.h>
623
#include "qemu-types.h"
624

    
625
/* On some host systems the guest address space is reserved on the host.
626
 * This allows the guest address space to be offset to a convenient location.
627
 */
628
//#define GUEST_BASE 0x20000000
629
#define GUEST_BASE 0
630

    
631
/* All direct uses of g2h and h2g need to go away for usermode softmmu.  */
632
#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
633
#define h2g(x) ({ \
634
    unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
635
    /* Check if given address fits target address space */ \
636
    assert(__ret == (abi_ulong)__ret); \
637
    (abi_ulong)__ret; \
638
})
639
#define h2g_valid(x) ({ \
640
    unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
641
    (__guest == (abi_ulong)__guest); \
642
})
643

    
644
#define saddr(x) g2h(x)
645
#define laddr(x) g2h(x)
646

    
647
#else /* !CONFIG_USER_ONLY */
648
/* NOTE: we use double casts if pointers and target_ulong have
649
   different sizes */
650
#define saddr(x) (uint8_t *)(long)(x)
651
#define laddr(x) (uint8_t *)(long)(x)
652
#endif
653

    
654
#define ldub_raw(p) ldub_p(laddr((p)))
655
#define ldsb_raw(p) ldsb_p(laddr((p)))
656
#define lduw_raw(p) lduw_p(laddr((p)))
657
#define ldsw_raw(p) ldsw_p(laddr((p)))
658
#define ldl_raw(p) ldl_p(laddr((p)))
659
#define ldq_raw(p) ldq_p(laddr((p)))
660
#define ldfl_raw(p) ldfl_p(laddr((p)))
661
#define ldfq_raw(p) ldfq_p(laddr((p)))
662
#define stb_raw(p, v) stb_p(saddr((p)), v)
663
#define stw_raw(p, v) stw_p(saddr((p)), v)
664
#define stl_raw(p, v) stl_p(saddr((p)), v)
665
#define stq_raw(p, v) stq_p(saddr((p)), v)
666
#define stfl_raw(p, v) stfl_p(saddr((p)), v)
667
#define stfq_raw(p, v) stfq_p(saddr((p)), v)
668

    
669

    
670
#if defined(CONFIG_USER_ONLY)
671

    
672
/* if user mode, no other memory access functions */
673
#define ldub(p) ldub_raw(p)
674
#define ldsb(p) ldsb_raw(p)
675
#define lduw(p) lduw_raw(p)
676
#define ldsw(p) ldsw_raw(p)
677
#define ldl(p) ldl_raw(p)
678
#define ldq(p) ldq_raw(p)
679
#define ldfl(p) ldfl_raw(p)
680
#define ldfq(p) ldfq_raw(p)
681
#define stb(p, v) stb_raw(p, v)
682
#define stw(p, v) stw_raw(p, v)
683
#define stl(p, v) stl_raw(p, v)
684
#define stq(p, v) stq_raw(p, v)
685
#define stfl(p, v) stfl_raw(p, v)
686
#define stfq(p, v) stfq_raw(p, v)
687

    
688
#define ldub_code(p) ldub_raw(p)
689
#define ldsb_code(p) ldsb_raw(p)
690
#define lduw_code(p) lduw_raw(p)
691
#define ldsw_code(p) ldsw_raw(p)
692
#define ldl_code(p) ldl_raw(p)
693
#define ldq_code(p) ldq_raw(p)
694

    
695
#define ldub_kernel(p) ldub_raw(p)
696
#define ldsb_kernel(p) ldsb_raw(p)
697
#define lduw_kernel(p) lduw_raw(p)
698
#define ldsw_kernel(p) ldsw_raw(p)
699
#define ldl_kernel(p) ldl_raw(p)
700
#define ldq_kernel(p) ldq_raw(p)
701
#define ldfl_kernel(p) ldfl_raw(p)
702
#define ldfq_kernel(p) ldfq_raw(p)
703
#define stb_kernel(p, v) stb_raw(p, v)
704
#define stw_kernel(p, v) stw_raw(p, v)
705
#define stl_kernel(p, v) stl_raw(p, v)
706
#define stq_kernel(p, v) stq_raw(p, v)
707
#define stfl_kernel(p, v) stfl_raw(p, v)
708
#define stfq_kernel(p, vt) stfq_raw(p, v)
709

    
710
#endif /* defined(CONFIG_USER_ONLY) */
711

    
712
/* page related stuff */
713

    
714
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
715
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
716
#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
717

    
718
/* ??? These should be the larger of unsigned long and target_ulong.  */
719
extern unsigned long qemu_real_host_page_size;
720
extern unsigned long qemu_host_page_bits;
721
extern unsigned long qemu_host_page_size;
722
extern unsigned long qemu_host_page_mask;
723

    
724
#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
725

    
726
/* same as PROT_xxx */
727
#define PAGE_READ      0x0001
728
#define PAGE_WRITE     0x0002
729
#define PAGE_EXEC      0x0004
730
#define PAGE_BITS      (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
731
#define PAGE_VALID     0x0008
732
/* original state of the write flag (used when tracking self-modifying
733
   code */
734
#define PAGE_WRITE_ORG 0x0010
735
#define PAGE_RESERVED  0x0020
736

    
737
void page_dump(FILE *f);
738
int walk_memory_regions(void *,
739
    int (*fn)(void *, unsigned long, unsigned long, unsigned long));
740
int page_get_flags(target_ulong address);
741
void page_set_flags(target_ulong start, target_ulong end, int flags);
742
int page_check_range(target_ulong start, target_ulong len, int flags);
743

    
744
void cpu_exec_init_all(unsigned long tb_size);
745
CPUState *cpu_copy(CPUState *env);
746
CPUState *qemu_get_cpu(int cpu);
747

    
748
void cpu_dump_state(CPUState *env, FILE *f,
749
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
750
                    int flags);
751
void cpu_dump_statistics (CPUState *env, FILE *f,
752
                          int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
753
                          int flags);
754

    
755
void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...)
756
    __attribute__ ((__format__ (__printf__, 2, 3)));
757
extern CPUState *first_cpu;
758
extern CPUState *cpu_single_env;
759
extern int64_t qemu_icount;
760
extern int use_icount;
761

    
762
#define CPU_INTERRUPT_HARD   0x02 /* hardware interrupt pending */
763
#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
764
#define CPU_INTERRUPT_TIMER  0x08 /* internal timer exception pending */
765
#define CPU_INTERRUPT_FIQ    0x10 /* Fast interrupt pending.  */
766
#define CPU_INTERRUPT_HALT   0x20 /* CPU halt wanted */
767
#define CPU_INTERRUPT_SMI    0x40 /* (x86 only) SMI interrupt pending */
768
#define CPU_INTERRUPT_DEBUG  0x80 /* Debug event occured.  */
769
#define CPU_INTERRUPT_VIRQ   0x100 /* virtual interrupt pending.  */
770
#define CPU_INTERRUPT_NMI    0x200 /* NMI pending. */
771
#define CPU_INTERRUPT_INIT   0x400 /* INIT pending. */
772
#define CPU_INTERRUPT_SIPI   0x800 /* SIPI pending. */
773

    
774
void cpu_interrupt(CPUState *s, int mask);
775
void cpu_reset_interrupt(CPUState *env, int mask);
776

    
777
void cpu_exit(CPUState *s);
778

    
779
int qemu_cpu_has_work(CPUState *env);
780

    
781
/* Breakpoint/watchpoint flags */
782
#define BP_MEM_READ           0x01
783
#define BP_MEM_WRITE          0x02
784
#define BP_MEM_ACCESS         (BP_MEM_READ | BP_MEM_WRITE)
785
#define BP_STOP_BEFORE_ACCESS 0x04
786
#define BP_WATCHPOINT_HIT     0x08
787
#define BP_GDB                0x10
788
#define BP_CPU                0x20
789

    
790
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
791
                          CPUBreakpoint **breakpoint);
792
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags);
793
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint);
794
void cpu_breakpoint_remove_all(CPUState *env, int mask);
795
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
796
                          int flags, CPUWatchpoint **watchpoint);
797
int cpu_watchpoint_remove(CPUState *env, target_ulong addr,
798
                          target_ulong len, int flags);
799
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint);
800
void cpu_watchpoint_remove_all(CPUState *env, int mask);
801

    
802
#define SSTEP_ENABLE  0x1  /* Enable simulated HW single stepping */
803
#define SSTEP_NOIRQ   0x2  /* Do not use IRQ while single stepping */
804
#define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
805

    
806
void cpu_single_step(CPUState *env, int enabled);
807
void cpu_reset(CPUState *s);
808

    
809
/* Return the physical page corresponding to a virtual one. Use it
810
   only for debugging because no protection checks are done. Return -1
811
   if no page found. */
812
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
813

    
814
#define CPU_LOG_TB_OUT_ASM (1 << 0)
815
#define CPU_LOG_TB_IN_ASM  (1 << 1)
816
#define CPU_LOG_TB_OP      (1 << 2)
817
#define CPU_LOG_TB_OP_OPT  (1 << 3)
818
#define CPU_LOG_INT        (1 << 4)
819
#define CPU_LOG_EXEC       (1 << 5)
820
#define CPU_LOG_PCALL      (1 << 6)
821
#define CPU_LOG_IOPORT     (1 << 7)
822
#define CPU_LOG_TB_CPU     (1 << 8)
823
#define CPU_LOG_RESET      (1 << 9)
824

    
825
/* define log items */
826
typedef struct CPULogItem {
827
    int mask;
828
    const char *name;
829
    const char *help;
830
} CPULogItem;
831

    
832
extern const CPULogItem cpu_log_items[];
833

    
834
void cpu_set_log(int log_flags);
835
void cpu_set_log_filename(const char *filename);
836
int cpu_str_to_log_mask(const char *str);
837

    
838
/* IO ports API */
839

    
840
/* NOTE: as these functions may be even used when there is an isa
841
   brige on non x86 targets, we always defined them */
842
#ifndef NO_CPU_IO_DEFS
843
void cpu_outb(CPUState *env, int addr, int val);
844
void cpu_outw(CPUState *env, int addr, int val);
845
void cpu_outl(CPUState *env, int addr, int val);
846
int cpu_inb(CPUState *env, int addr);
847
int cpu_inw(CPUState *env, int addr);
848
int cpu_inl(CPUState *env, int addr);
849
#endif
850

    
851
/* memory API */
852

    
853
extern int phys_ram_fd;
854
extern uint8_t *phys_ram_dirty;
855
extern ram_addr_t ram_size;
856
extern ram_addr_t last_ram_offset;
857

    
858
/* physical memory access */
859

    
860
/* MMIO pages are identified by a combination of an IO device index and
861
   3 flags.  The ROMD code stores the page ram offset in iotlb entry, 
862
   so only a limited number of ids are avaiable.  */
863

    
864
#define IO_MEM_NB_ENTRIES  (1 << (TARGET_PAGE_BITS  - IO_MEM_SHIFT))
865

    
866
/* Flags stored in the low bits of the TLB virtual address.  These are
867
   defined so that fast path ram access is all zeros.  */
868
/* Zero if TLB entry is valid.  */
869
#define TLB_INVALID_MASK   (1 << 3)
870
/* Set if TLB entry references a clean RAM page.  The iotlb entry will
871
   contain the page physical address.  */
872
#define TLB_NOTDIRTY    (1 << 4)
873
/* Set if TLB entry is an IO callback.  */
874
#define TLB_MMIO        (1 << 5)
875

    
876
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
877
                        uint8_t *buf, int len, int is_write);
878

    
879
#define VGA_DIRTY_FLAG       0x01
880
#define CODE_DIRTY_FLAG      0x02
881
#define KQEMU_DIRTY_FLAG     0x04
882
#define MIGRATION_DIRTY_FLAG 0x08
883

    
884
/* read dirty bit (return 0 or 1) */
885
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
886
{
887
    return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
888
}
889

    
890
static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
891
                                                int dirty_flags)
892
{
893
    return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
894
}
895

    
896
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
897
{
898
    phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
899
}
900

    
901
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
902
                                     int dirty_flags);
903
void cpu_tlb_update_dirty(CPUState *env);
904

    
905
int cpu_physical_memory_set_dirty_tracking(int enable);
906

    
907
int cpu_physical_memory_get_dirty_tracking(void);
908

    
909
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
910
                                   target_phys_addr_t end_addr);
911

    
912
void dump_exec_info(FILE *f,
913
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
914

    
915
/* Coalesced MMIO regions are areas where write operations can be reordered.
916
 * This usually implies that write operations are side-effect free.  This allows
917
 * batching which can make a major impact on performance when using
918
 * virtualization.
919
 */
920
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
921

    
922
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
923

    
924
/*******************************************/
925
/* host CPU ticks (if available) */
926

    
927
#if defined(_ARCH_PPC)
928

    
929
static inline int64_t cpu_get_real_ticks(void)
930
{
931
    int64_t retval;
932
#ifdef _ARCH_PPC64
933
    /* This reads timebase in one 64bit go and includes Cell workaround from:
934
       http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
935
     */
936
    __asm__ __volatile__ (
937
        "mftb    %0\n\t"
938
        "cmpwi   %0,0\n\t"
939
        "beq-    $-8"
940
        : "=r" (retval));
941
#else
942
    /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
943
    unsigned long junk;
944
    __asm__ __volatile__ (
945
        "mftbu   %1\n\t"
946
        "mftb    %L0\n\t"
947
        "mftbu   %0\n\t"
948
        "cmpw    %0,%1\n\t"
949
        "bne     $-16"
950
        : "=r" (retval), "=r" (junk));
951
#endif
952
    return retval;
953
}
954

    
955
#elif defined(__i386__)
956

    
957
static inline int64_t cpu_get_real_ticks(void)
958
{
959
    int64_t val;
960
    asm volatile ("rdtsc" : "=A" (val));
961
    return val;
962
}
963

    
964
#elif defined(__x86_64__)
965

    
966
static inline int64_t cpu_get_real_ticks(void)
967
{
968
    uint32_t low,high;
969
    int64_t val;
970
    asm volatile("rdtsc" : "=a" (low), "=d" (high));
971
    val = high;
972
    val <<= 32;
973
    val |= low;
974
    return val;
975
}
976

    
977
#elif defined(__hppa__)
978

    
979
static inline int64_t cpu_get_real_ticks(void)
980
{
981
    int val;
982
    asm volatile ("mfctl %%cr16, %0" : "=r"(val));
983
    return val;
984
}
985

    
986
#elif defined(__ia64)
987

    
988
static inline int64_t cpu_get_real_ticks(void)
989
{
990
        int64_t val;
991
        asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
992
        return val;
993
}
994

    
995
#elif defined(__s390__)
996

    
997
static inline int64_t cpu_get_real_ticks(void)
998
{
999
    int64_t val;
1000
    asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
1001
    return val;
1002
}
1003

    
1004
#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
1005

    
1006
static inline int64_t cpu_get_real_ticks (void)
1007
{
1008
#if     defined(_LP64)
1009
        uint64_t        rval;
1010
        asm volatile("rd %%tick,%0" : "=r"(rval));
1011
        return rval;
1012
#else
1013
        union {
1014
                uint64_t i64;
1015
                struct {
1016
                        uint32_t high;
1017
                        uint32_t low;
1018
                }       i32;
1019
        } rval;
1020
        asm volatile("rd %%tick,%1; srlx %1,32,%0"
1021
                : "=r"(rval.i32.high), "=r"(rval.i32.low));
1022
        return rval.i64;
1023
#endif
1024
}
1025

    
1026
#elif defined(__mips__)
1027

    
1028
static inline int64_t cpu_get_real_ticks(void)
1029
{
1030
#if __mips_isa_rev >= 2
1031
    uint32_t count;
1032
    static uint32_t cyc_per_count = 0;
1033

    
1034
    if (!cyc_per_count)
1035
        __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count));
1036

    
1037
    __asm__ __volatile__("rdhwr %1, $2" : "=r" (count));
1038
    return (int64_t)(count * cyc_per_count);
1039
#else
1040
    /* FIXME */
1041
    static int64_t ticks = 0;
1042
    return ticks++;
1043
#endif
1044
}
1045

    
1046
#else
1047
/* The host CPU doesn't have an easily accessible cycle counter.
1048
   Just return a monotonically increasing value.  This will be
1049
   totally wrong, but hopefully better than nothing.  */
1050
static inline int64_t cpu_get_real_ticks (void)
1051
{
1052
    static int64_t ticks = 0;
1053
    return ticks++;
1054
}
1055
#endif
1056

    
1057
/* profiling */
1058
#ifdef CONFIG_PROFILER
1059
static inline int64_t profile_getclock(void)
1060
{
1061
    return cpu_get_real_ticks();
1062
}
1063

    
1064
extern int64_t kqemu_time, kqemu_time_start;
1065
extern int64_t qemu_time, qemu_time_start;
1066
extern int64_t tlb_flush_time;
1067
extern int64_t kqemu_exec_count;
1068
extern int64_t dev_time;
1069
extern int64_t kqemu_ret_int_count;
1070
extern int64_t kqemu_ret_excp_count;
1071
extern int64_t kqemu_ret_intr_count;
1072
#endif
1073

    
1074
#endif /* CPU_ALL_H */