Statistics
| Branch: | Revision:

root / cpu-all.h @ 151f7749

History | View | Annotate | Download (25.8 kB)

1
/*
2
 * defines common to all virtual CPUs
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#ifndef CPU_ALL_H
21
#define CPU_ALL_H
22

    
23
#include "qemu-common.h"
24
#include "cpu-common.h"
25

    
26
/* some important defines:
27
 *
28
 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
29
 * memory accesses.
30
 *
31
 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
32
 * otherwise little endian.
33
 *
34
 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
35
 *
36
 * TARGET_WORDS_BIGENDIAN : same for target cpu
37
 */
38

    
39
#include "softfloat.h"
40

    
41
#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
42
#define BSWAP_NEEDED
43
#endif
44

    
45
#ifdef BSWAP_NEEDED
46

    
47
static inline uint16_t tswap16(uint16_t s)
48
{
49
    return bswap16(s);
50
}
51

    
52
static inline uint32_t tswap32(uint32_t s)
53
{
54
    return bswap32(s);
55
}
56

    
57
static inline uint64_t tswap64(uint64_t s)
58
{
59
    return bswap64(s);
60
}
61

    
62
static inline void tswap16s(uint16_t *s)
63
{
64
    *s = bswap16(*s);
65
}
66

    
67
static inline void tswap32s(uint32_t *s)
68
{
69
    *s = bswap32(*s);
70
}
71

    
72
static inline void tswap64s(uint64_t *s)
73
{
74
    *s = bswap64(*s);
75
}
76

    
77
#else
78

    
79
static inline uint16_t tswap16(uint16_t s)
80
{
81
    return s;
82
}
83

    
84
static inline uint32_t tswap32(uint32_t s)
85
{
86
    return s;
87
}
88

    
89
static inline uint64_t tswap64(uint64_t s)
90
{
91
    return s;
92
}
93

    
94
static inline void tswap16s(uint16_t *s)
95
{
96
}
97

    
98
static inline void tswap32s(uint32_t *s)
99
{
100
}
101

    
102
static inline void tswap64s(uint64_t *s)
103
{
104
}
105

    
106
#endif
107

    
108
#if TARGET_LONG_SIZE == 4
109
#define tswapl(s) tswap32(s)
110
#define tswapls(s) tswap32s((uint32_t *)(s))
111
#define bswaptls(s) bswap32s(s)
112
#else
113
#define tswapl(s) tswap64(s)
114
#define tswapls(s) tswap64s((uint64_t *)(s))
115
#define bswaptls(s) bswap64s(s)
116
#endif
117

    
118
typedef union {
119
    float32 f;
120
    uint32_t l;
121
} CPU_FloatU;
122

    
123
/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
124
   endian ! */
125
typedef union {
126
    float64 d;
127
#if defined(WORDS_BIGENDIAN) \
128
    || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
129
    struct {
130
        uint32_t upper;
131
        uint32_t lower;
132
    } l;
133
#else
134
    struct {
135
        uint32_t lower;
136
        uint32_t upper;
137
    } l;
138
#endif
139
    uint64_t ll;
140
} CPU_DoubleU;
141

    
142
#ifdef TARGET_SPARC
143
typedef union {
144
    float128 q;
145
#if defined(WORDS_BIGENDIAN) \
146
    || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
147
    struct {
148
        uint32_t upmost;
149
        uint32_t upper;
150
        uint32_t lower;
151
        uint32_t lowest;
152
    } l;
153
    struct {
154
        uint64_t upper;
155
        uint64_t lower;
156
    } ll;
157
#else
158
    struct {
159
        uint32_t lowest;
160
        uint32_t lower;
161
        uint32_t upper;
162
        uint32_t upmost;
163
    } l;
164
    struct {
165
        uint64_t lower;
166
        uint64_t upper;
167
    } ll;
168
#endif
169
} CPU_QuadU;
170
#endif
171

    
172
/* CPU memory access without any memory or io remapping */
173

    
174
/*
175
 * the generic syntax for the memory accesses is:
176
 *
177
 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
178
 *
179
 * store: st{type}{size}{endian}_{access_type}(ptr, val)
180
 *
181
 * type is:
182
 * (empty): integer access
183
 *   f    : float access
184
 *
185
 * sign is:
186
 * (empty): for floats or 32 bit size
187
 *   u    : unsigned
188
 *   s    : signed
189
 *
190
 * size is:
191
 *   b: 8 bits
192
 *   w: 16 bits
193
 *   l: 32 bits
194
 *   q: 64 bits
195
 *
196
 * endian is:
197
 * (empty): target cpu endianness or 8 bit access
198
 *   r    : reversed target cpu endianness (not implemented yet)
199
 *   be   : big endian (not implemented yet)
200
 *   le   : little endian (not implemented yet)
201
 *
202
 * access_type is:
203
 *   raw    : host memory access
204
 *   user   : user mode access using soft MMU
205
 *   kernel : kernel mode access using soft MMU
206
 */
207
static inline int ldub_p(const void *ptr)
208
{
209
    return *(uint8_t *)ptr;
210
}
211

    
212
static inline int ldsb_p(const void *ptr)
213
{
214
    return *(int8_t *)ptr;
215
}
216

    
217
static inline void stb_p(void *ptr, int v)
218
{
219
    *(uint8_t *)ptr = v;
220
}
221

    
222
/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
223
   kernel handles unaligned load/stores may give better results, but
224
   it is a system wide setting : bad */
225
#if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
226

    
227
/* conservative code for little endian unaligned accesses */
228
static inline int lduw_le_p(const void *ptr)
229
{
230
#ifdef _ARCH_PPC
231
    int val;
232
    __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
233
    return val;
234
#else
235
    const uint8_t *p = ptr;
236
    return p[0] | (p[1] << 8);
237
#endif
238
}
239

    
240
static inline int ldsw_le_p(const void *ptr)
241
{
242
#ifdef _ARCH_PPC
243
    int val;
244
    __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
245
    return (int16_t)val;
246
#else
247
    const uint8_t *p = ptr;
248
    return (int16_t)(p[0] | (p[1] << 8));
249
#endif
250
}
251

    
252
static inline int ldl_le_p(const void *ptr)
253
{
254
#ifdef _ARCH_PPC
255
    int val;
256
    __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
257
    return val;
258
#else
259
    const uint8_t *p = ptr;
260
    return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
261
#endif
262
}
263

    
264
static inline uint64_t ldq_le_p(const void *ptr)
265
{
266
    const uint8_t *p = ptr;
267
    uint32_t v1, v2;
268
    v1 = ldl_le_p(p);
269
    v2 = ldl_le_p(p + 4);
270
    return v1 | ((uint64_t)v2 << 32);
271
}
272

    
273
static inline void stw_le_p(void *ptr, int v)
274
{
275
#ifdef _ARCH_PPC
276
    __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
277
#else
278
    uint8_t *p = ptr;
279
    p[0] = v;
280
    p[1] = v >> 8;
281
#endif
282
}
283

    
284
static inline void stl_le_p(void *ptr, int v)
285
{
286
#ifdef _ARCH_PPC
287
    __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
288
#else
289
    uint8_t *p = ptr;
290
    p[0] = v;
291
    p[1] = v >> 8;
292
    p[2] = v >> 16;
293
    p[3] = v >> 24;
294
#endif
295
}
296

    
297
static inline void stq_le_p(void *ptr, uint64_t v)
298
{
299
    uint8_t *p = ptr;
300
    stl_le_p(p, (uint32_t)v);
301
    stl_le_p(p + 4, v >> 32);
302
}
303

    
304
/* float access */
305

    
306
static inline float32 ldfl_le_p(const void *ptr)
307
{
308
    union {
309
        float32 f;
310
        uint32_t i;
311
    } u;
312
    u.i = ldl_le_p(ptr);
313
    return u.f;
314
}
315

    
316
static inline void stfl_le_p(void *ptr, float32 v)
317
{
318
    union {
319
        float32 f;
320
        uint32_t i;
321
    } u;
322
    u.f = v;
323
    stl_le_p(ptr, u.i);
324
}
325

    
326
static inline float64 ldfq_le_p(const void *ptr)
327
{
328
    CPU_DoubleU u;
329
    u.l.lower = ldl_le_p(ptr);
330
    u.l.upper = ldl_le_p(ptr + 4);
331
    return u.d;
332
}
333

    
334
static inline void stfq_le_p(void *ptr, float64 v)
335
{
336
    CPU_DoubleU u;
337
    u.d = v;
338
    stl_le_p(ptr, u.l.lower);
339
    stl_le_p(ptr + 4, u.l.upper);
340
}
341

    
342
#else
343

    
344
static inline int lduw_le_p(const void *ptr)
345
{
346
    return *(uint16_t *)ptr;
347
}
348

    
349
static inline int ldsw_le_p(const void *ptr)
350
{
351
    return *(int16_t *)ptr;
352
}
353

    
354
static inline int ldl_le_p(const void *ptr)
355
{
356
    return *(uint32_t *)ptr;
357
}
358

    
359
static inline uint64_t ldq_le_p(const void *ptr)
360
{
361
    return *(uint64_t *)ptr;
362
}
363

    
364
static inline void stw_le_p(void *ptr, int v)
365
{
366
    *(uint16_t *)ptr = v;
367
}
368

    
369
static inline void stl_le_p(void *ptr, int v)
370
{
371
    *(uint32_t *)ptr = v;
372
}
373

    
374
static inline void stq_le_p(void *ptr, uint64_t v)
375
{
376
    *(uint64_t *)ptr = v;
377
}
378

    
379
/* float access */
380

    
381
static inline float32 ldfl_le_p(const void *ptr)
382
{
383
    return *(float32 *)ptr;
384
}
385

    
386
static inline float64 ldfq_le_p(const void *ptr)
387
{
388
    return *(float64 *)ptr;
389
}
390

    
391
static inline void stfl_le_p(void *ptr, float32 v)
392
{
393
    *(float32 *)ptr = v;
394
}
395

    
396
static inline void stfq_le_p(void *ptr, float64 v)
397
{
398
    *(float64 *)ptr = v;
399
}
400
#endif
401

    
402
#if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
403

    
404
static inline int lduw_be_p(const void *ptr)
405
{
406
#if defined(__i386__)
407
    int val;
408
    asm volatile ("movzwl %1, %0\n"
409
                  "xchgb %b0, %h0\n"
410
                  : "=q" (val)
411
                  : "m" (*(uint16_t *)ptr));
412
    return val;
413
#else
414
    const uint8_t *b = ptr;
415
    return ((b[0] << 8) | b[1]);
416
#endif
417
}
418

    
419
static inline int ldsw_be_p(const void *ptr)
420
{
421
#if defined(__i386__)
422
    int val;
423
    asm volatile ("movzwl %1, %0\n"
424
                  "xchgb %b0, %h0\n"
425
                  : "=q" (val)
426
                  : "m" (*(uint16_t *)ptr));
427
    return (int16_t)val;
428
#else
429
    const uint8_t *b = ptr;
430
    return (int16_t)((b[0] << 8) | b[1]);
431
#endif
432
}
433

    
434
static inline int ldl_be_p(const void *ptr)
435
{
436
#if defined(__i386__) || defined(__x86_64__)
437
    int val;
438
    asm volatile ("movl %1, %0\n"
439
                  "bswap %0\n"
440
                  : "=r" (val)
441
                  : "m" (*(uint32_t *)ptr));
442
    return val;
443
#else
444
    const uint8_t *b = ptr;
445
    return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
446
#endif
447
}
448

    
449
static inline uint64_t ldq_be_p(const void *ptr)
450
{
451
    uint32_t a,b;
452
    a = ldl_be_p(ptr);
453
    b = ldl_be_p((uint8_t *)ptr + 4);
454
    return (((uint64_t)a<<32)|b);
455
}
456

    
457
static inline void stw_be_p(void *ptr, int v)
458
{
459
#if defined(__i386__)
460
    asm volatile ("xchgb %b0, %h0\n"
461
                  "movw %w0, %1\n"
462
                  : "=q" (v)
463
                  : "m" (*(uint16_t *)ptr), "0" (v));
464
#else
465
    uint8_t *d = (uint8_t *) ptr;
466
    d[0] = v >> 8;
467
    d[1] = v;
468
#endif
469
}
470

    
471
static inline void stl_be_p(void *ptr, int v)
472
{
473
#if defined(__i386__) || defined(__x86_64__)
474
    asm volatile ("bswap %0\n"
475
                  "movl %0, %1\n"
476
                  : "=r" (v)
477
                  : "m" (*(uint32_t *)ptr), "0" (v));
478
#else
479
    uint8_t *d = (uint8_t *) ptr;
480
    d[0] = v >> 24;
481
    d[1] = v >> 16;
482
    d[2] = v >> 8;
483
    d[3] = v;
484
#endif
485
}
486

    
487
static inline void stq_be_p(void *ptr, uint64_t v)
488
{
489
    stl_be_p(ptr, v >> 32);
490
    stl_be_p((uint8_t *)ptr + 4, v);
491
}
492

    
493
/* float access */
494

    
495
static inline float32 ldfl_be_p(const void *ptr)
496
{
497
    union {
498
        float32 f;
499
        uint32_t i;
500
    } u;
501
    u.i = ldl_be_p(ptr);
502
    return u.f;
503
}
504

    
505
static inline void stfl_be_p(void *ptr, float32 v)
506
{
507
    union {
508
        float32 f;
509
        uint32_t i;
510
    } u;
511
    u.f = v;
512
    stl_be_p(ptr, u.i);
513
}
514

    
515
static inline float64 ldfq_be_p(const void *ptr)
516
{
517
    CPU_DoubleU u;
518
    u.l.upper = ldl_be_p(ptr);
519
    u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
520
    return u.d;
521
}
522

    
523
static inline void stfq_be_p(void *ptr, float64 v)
524
{
525
    CPU_DoubleU u;
526
    u.d = v;
527
    stl_be_p(ptr, u.l.upper);
528
    stl_be_p((uint8_t *)ptr + 4, u.l.lower);
529
}
530

    
531
#else
532

    
533
static inline int lduw_be_p(const void *ptr)
534
{
535
    return *(uint16_t *)ptr;
536
}
537

    
538
static inline int ldsw_be_p(const void *ptr)
539
{
540
    return *(int16_t *)ptr;
541
}
542

    
543
static inline int ldl_be_p(const void *ptr)
544
{
545
    return *(uint32_t *)ptr;
546
}
547

    
548
static inline uint64_t ldq_be_p(const void *ptr)
549
{
550
    return *(uint64_t *)ptr;
551
}
552

    
553
static inline void stw_be_p(void *ptr, int v)
554
{
555
    *(uint16_t *)ptr = v;
556
}
557

    
558
static inline void stl_be_p(void *ptr, int v)
559
{
560
    *(uint32_t *)ptr = v;
561
}
562

    
563
static inline void stq_be_p(void *ptr, uint64_t v)
564
{
565
    *(uint64_t *)ptr = v;
566
}
567

    
568
/* float access */
569

    
570
static inline float32 ldfl_be_p(const void *ptr)
571
{
572
    return *(float32 *)ptr;
573
}
574

    
575
static inline float64 ldfq_be_p(const void *ptr)
576
{
577
    return *(float64 *)ptr;
578
}
579

    
580
static inline void stfl_be_p(void *ptr, float32 v)
581
{
582
    *(float32 *)ptr = v;
583
}
584

    
585
static inline void stfq_be_p(void *ptr, float64 v)
586
{
587
    *(float64 *)ptr = v;
588
}
589

    
590
#endif
591

    
592
/* target CPU memory access functions */
593
#if defined(TARGET_WORDS_BIGENDIAN)
594
#define lduw_p(p) lduw_be_p(p)
595
#define ldsw_p(p) ldsw_be_p(p)
596
#define ldl_p(p) ldl_be_p(p)
597
#define ldq_p(p) ldq_be_p(p)
598
#define ldfl_p(p) ldfl_be_p(p)
599
#define ldfq_p(p) ldfq_be_p(p)
600
#define stw_p(p, v) stw_be_p(p, v)
601
#define stl_p(p, v) stl_be_p(p, v)
602
#define stq_p(p, v) stq_be_p(p, v)
603
#define stfl_p(p, v) stfl_be_p(p, v)
604
#define stfq_p(p, v) stfq_be_p(p, v)
605
#else
606
#define lduw_p(p) lduw_le_p(p)
607
#define ldsw_p(p) ldsw_le_p(p)
608
#define ldl_p(p) ldl_le_p(p)
609
#define ldq_p(p) ldq_le_p(p)
610
#define ldfl_p(p) ldfl_le_p(p)
611
#define ldfq_p(p) ldfq_le_p(p)
612
#define stw_p(p, v) stw_le_p(p, v)
613
#define stl_p(p, v) stl_le_p(p, v)
614
#define stq_p(p, v) stq_le_p(p, v)
615
#define stfl_p(p, v) stfl_le_p(p, v)
616
#define stfq_p(p, v) stfq_le_p(p, v)
617
#endif
618

    
619
/* MMU memory access macros */
620

    
621
#if defined(CONFIG_USER_ONLY)
622
#include <assert.h>
623
#include "qemu-types.h"
624

    
625
/* On some host systems the guest address space is reserved on the host.
626
 * This allows the guest address space to be offset to a convenient location.
627
 */
628
//#define GUEST_BASE 0x20000000
629
#define GUEST_BASE 0
630

    
631
/* All direct uses of g2h and h2g need to go away for usermode softmmu.  */
632
#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
633
#define h2g(x) ({ \
634
    unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
635
    /* Check if given address fits target address space */ \
636
    assert(__ret == (abi_ulong)__ret); \
637
    (abi_ulong)__ret; \
638
})
639
#define h2g_valid(x) ({ \
640
    unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
641
    (__guest == (abi_ulong)__guest); \
642
})
643

    
644
#define saddr(x) g2h(x)
645
#define laddr(x) g2h(x)
646

    
647
#else /* !CONFIG_USER_ONLY */
648
/* NOTE: we use double casts if pointers and target_ulong have
649
   different sizes */
650
#define saddr(x) (uint8_t *)(long)(x)
651
#define laddr(x) (uint8_t *)(long)(x)
652
#endif
653

    
654
#define ldub_raw(p) ldub_p(laddr((p)))
655
#define ldsb_raw(p) ldsb_p(laddr((p)))
656
#define lduw_raw(p) lduw_p(laddr((p)))
657
#define ldsw_raw(p) ldsw_p(laddr((p)))
658
#define ldl_raw(p) ldl_p(laddr((p)))
659
#define ldq_raw(p) ldq_p(laddr((p)))
660
#define ldfl_raw(p) ldfl_p(laddr((p)))
661
#define ldfq_raw(p) ldfq_p(laddr((p)))
662
#define stb_raw(p, v) stb_p(saddr((p)), v)
663
#define stw_raw(p, v) stw_p(saddr((p)), v)
664
#define stl_raw(p, v) stl_p(saddr((p)), v)
665
#define stq_raw(p, v) stq_p(saddr((p)), v)
666
#define stfl_raw(p, v) stfl_p(saddr((p)), v)
667
#define stfq_raw(p, v) stfq_p(saddr((p)), v)
668

    
669

    
670
#if defined(CONFIG_USER_ONLY)
671

    
672
/* if user mode, no other memory access functions */
673
#define ldub(p) ldub_raw(p)
674
#define ldsb(p) ldsb_raw(p)
675
#define lduw(p) lduw_raw(p)
676
#define ldsw(p) ldsw_raw(p)
677
#define ldl(p) ldl_raw(p)
678
#define ldq(p) ldq_raw(p)
679
#define ldfl(p) ldfl_raw(p)
680
#define ldfq(p) ldfq_raw(p)
681
#define stb(p, v) stb_raw(p, v)
682
#define stw(p, v) stw_raw(p, v)
683
#define stl(p, v) stl_raw(p, v)
684
#define stq(p, v) stq_raw(p, v)
685
#define stfl(p, v) stfl_raw(p, v)
686
#define stfq(p, v) stfq_raw(p, v)
687

    
688
#define ldub_code(p) ldub_raw(p)
689
#define ldsb_code(p) ldsb_raw(p)
690
#define lduw_code(p) lduw_raw(p)
691
#define ldsw_code(p) ldsw_raw(p)
692
#define ldl_code(p) ldl_raw(p)
693
#define ldq_code(p) ldq_raw(p)
694

    
695
#define ldub_kernel(p) ldub_raw(p)
696
#define ldsb_kernel(p) ldsb_raw(p)
697
#define lduw_kernel(p) lduw_raw(p)
698
#define ldsw_kernel(p) ldsw_raw(p)
699
#define ldl_kernel(p) ldl_raw(p)
700
#define ldq_kernel(p) ldq_raw(p)
701
#define ldfl_kernel(p) ldfl_raw(p)
702
#define ldfq_kernel(p) ldfq_raw(p)
703
#define stb_kernel(p, v) stb_raw(p, v)
704
#define stw_kernel(p, v) stw_raw(p, v)
705
#define stl_kernel(p, v) stl_raw(p, v)
706
#define stq_kernel(p, v) stq_raw(p, v)
707
#define stfl_kernel(p, v) stfl_raw(p, v)
708
#define stfq_kernel(p, vt) stfq_raw(p, v)
709

    
710
#endif /* defined(CONFIG_USER_ONLY) */
711

    
712
/* page related stuff */
713

    
714
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
715
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
716
#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
717

    
718
/* ??? These should be the larger of unsigned long and target_ulong.  */
719
extern unsigned long qemu_real_host_page_size;
720
extern unsigned long qemu_host_page_bits;
721
extern unsigned long qemu_host_page_size;
722
extern unsigned long qemu_host_page_mask;
723

    
724
#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
725

    
726
/* same as PROT_xxx */
727
#define PAGE_READ      0x0001
728
#define PAGE_WRITE     0x0002
729
#define PAGE_EXEC      0x0004
730
#define PAGE_BITS      (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
731
#define PAGE_VALID     0x0008
732
/* original state of the write flag (used when tracking self-modifying
733
   code */
734
#define PAGE_WRITE_ORG 0x0010
735
#define PAGE_RESERVED  0x0020
736

    
737
void page_dump(FILE *f);
738
int page_get_flags(target_ulong address);
739
void page_set_flags(target_ulong start, target_ulong end, int flags);
740
int page_check_range(target_ulong start, target_ulong len, int flags);
741

    
742
void cpu_exec_init_all(unsigned long tb_size);
743
CPUState *cpu_copy(CPUState *env);
744

    
745
void cpu_dump_state(CPUState *env, FILE *f,
746
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
747
                    int flags);
748
void cpu_dump_statistics (CPUState *env, FILE *f,
749
                          int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
750
                          int flags);
751

    
752
void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...)
753
    __attribute__ ((__format__ (__printf__, 2, 3)));
754
extern CPUState *first_cpu;
755
extern CPUState *cpu_single_env;
756
extern int64_t qemu_icount;
757
extern int use_icount;
758

    
759
#define CPU_INTERRUPT_HARD   0x02 /* hardware interrupt pending */
760
#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
761
#define CPU_INTERRUPT_TIMER  0x08 /* internal timer exception pending */
762
#define CPU_INTERRUPT_FIQ    0x10 /* Fast interrupt pending.  */
763
#define CPU_INTERRUPT_HALT   0x20 /* CPU halt wanted */
764
#define CPU_INTERRUPT_SMI    0x40 /* (x86 only) SMI interrupt pending */
765
#define CPU_INTERRUPT_DEBUG  0x80 /* Debug event occured.  */
766
#define CPU_INTERRUPT_VIRQ   0x100 /* virtual interrupt pending.  */
767
#define CPU_INTERRUPT_NMI    0x200 /* NMI pending. */
768

    
769
void cpu_interrupt(CPUState *s, int mask);
770
void cpu_reset_interrupt(CPUState *env, int mask);
771

    
772
void cpu_exit(CPUState *s);
773

    
774
int qemu_cpu_has_work(CPUState *env);
775

    
776
/* Breakpoint/watchpoint flags */
777
#define BP_MEM_READ           0x01
778
#define BP_MEM_WRITE          0x02
779
#define BP_MEM_ACCESS         (BP_MEM_READ | BP_MEM_WRITE)
780
#define BP_STOP_BEFORE_ACCESS 0x04
781
#define BP_WATCHPOINT_HIT     0x08
782
#define BP_GDB                0x10
783
#define BP_CPU                0x20
784

    
785
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
786
                          CPUBreakpoint **breakpoint);
787
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags);
788
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint);
789
void cpu_breakpoint_remove_all(CPUState *env, int mask);
790
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
791
                          int flags, CPUWatchpoint **watchpoint);
792
int cpu_watchpoint_remove(CPUState *env, target_ulong addr,
793
                          target_ulong len, int flags);
794
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint);
795
void cpu_watchpoint_remove_all(CPUState *env, int mask);
796

    
797
#define SSTEP_ENABLE  0x1  /* Enable simulated HW single stepping */
798
#define SSTEP_NOIRQ   0x2  /* Do not use IRQ while single stepping */
799
#define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
800

    
801
void cpu_single_step(CPUState *env, int enabled);
802
void cpu_reset(CPUState *s);
803

    
804
/* Return the physical page corresponding to a virtual one. Use it
805
   only for debugging because no protection checks are done. Return -1
806
   if no page found. */
807
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
808

    
809
#define CPU_LOG_TB_OUT_ASM (1 << 0)
810
#define CPU_LOG_TB_IN_ASM  (1 << 1)
811
#define CPU_LOG_TB_OP      (1 << 2)
812
#define CPU_LOG_TB_OP_OPT  (1 << 3)
813
#define CPU_LOG_INT        (1 << 4)
814
#define CPU_LOG_EXEC       (1 << 5)
815
#define CPU_LOG_PCALL      (1 << 6)
816
#define CPU_LOG_IOPORT     (1 << 7)
817
#define CPU_LOG_TB_CPU     (1 << 8)
818
#define CPU_LOG_RESET      (1 << 9)
819

    
820
/* define log items */
821
typedef struct CPULogItem {
822
    int mask;
823
    const char *name;
824
    const char *help;
825
} CPULogItem;
826

    
827
extern const CPULogItem cpu_log_items[];
828

    
829
void cpu_set_log(int log_flags);
830
void cpu_set_log_filename(const char *filename);
831
int cpu_str_to_log_mask(const char *str);
832

    
833
/* IO ports API */
834

    
835
/* NOTE: as these functions may be even used when there is an isa
836
   brige on non x86 targets, we always defined them */
837
#ifndef NO_CPU_IO_DEFS
838
void cpu_outb(CPUState *env, int addr, int val);
839
void cpu_outw(CPUState *env, int addr, int val);
840
void cpu_outl(CPUState *env, int addr, int val);
841
int cpu_inb(CPUState *env, int addr);
842
int cpu_inw(CPUState *env, int addr);
843
int cpu_inl(CPUState *env, int addr);
844
#endif
845

    
846
/* memory API */
847

    
848
extern int phys_ram_fd;
849
extern uint8_t *phys_ram_dirty;
850
extern ram_addr_t ram_size;
851
extern ram_addr_t last_ram_offset;
852

    
853
/* physical memory access */
854

    
855
/* MMIO pages are identified by a combination of an IO device index and
856
   3 flags.  The ROMD code stores the page ram offset in iotlb entry, 
857
   so only a limited number of ids are avaiable.  */
858

    
859
#define IO_MEM_NB_ENTRIES  (1 << (TARGET_PAGE_BITS  - IO_MEM_SHIFT))
860

    
861
/* Flags stored in the low bits of the TLB virtual address.  These are
862
   defined so that fast path ram access is all zeros.  */
863
/* Zero if TLB entry is valid.  */
864
#define TLB_INVALID_MASK   (1 << 3)
865
/* Set if TLB entry references a clean RAM page.  The iotlb entry will
866
   contain the page physical address.  */
867
#define TLB_NOTDIRTY    (1 << 4)
868
/* Set if TLB entry is an IO callback.  */
869
#define TLB_MMIO        (1 << 5)
870

    
871
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
872
                        uint8_t *buf, int len, int is_write);
873

    
874
#define VGA_DIRTY_FLAG       0x01
875
#define CODE_DIRTY_FLAG      0x02
876
#define KQEMU_DIRTY_FLAG     0x04
877
#define MIGRATION_DIRTY_FLAG 0x08
878

    
879
/* read dirty bit (return 0 or 1) */
880
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
881
{
882
    return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
883
}
884

    
885
static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
886
                                                int dirty_flags)
887
{
888
    return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
889
}
890

    
891
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
892
{
893
    phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
894
}
895

    
896
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
897
                                     int dirty_flags);
898
void cpu_tlb_update_dirty(CPUState *env);
899

    
900
int cpu_physical_memory_set_dirty_tracking(int enable);
901

    
902
int cpu_physical_memory_get_dirty_tracking(void);
903

    
904
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
905
                                   target_phys_addr_t end_addr);
906

    
907
void dump_exec_info(FILE *f,
908
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
909

    
910
/* Coalesced MMIO regions are areas where write operations can be reordered.
911
 * This usually implies that write operations are side-effect free.  This allows
912
 * batching which can make a major impact on performance when using
913
 * virtualization.
914
 */
915
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
916

    
917
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
918

    
919
/*******************************************/
920
/* host CPU ticks (if available) */
921

    
922
#if defined(_ARCH_PPC)
923

    
924
static inline int64_t cpu_get_real_ticks(void)
925
{
926
    int64_t retval;
927
#ifdef _ARCH_PPC64
928
    /* This reads timebase in one 64bit go and includes Cell workaround from:
929
       http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
930
     */
931
    __asm__ __volatile__ (
932
        "mftb    %0\n\t"
933
        "cmpwi   %0,0\n\t"
934
        "beq-    $-8"
935
        : "=r" (retval));
936
#else
937
    /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
938
    unsigned long junk;
939
    __asm__ __volatile__ (
940
        "mftbu   %1\n\t"
941
        "mftb    %L0\n\t"
942
        "mftbu   %0\n\t"
943
        "cmpw    %0,%1\n\t"
944
        "bne     $-16"
945
        : "=r" (retval), "=r" (junk));
946
#endif
947
    return retval;
948
}
949

    
950
#elif defined(__i386__)
951

    
952
static inline int64_t cpu_get_real_ticks(void)
953
{
954
    int64_t val;
955
    asm volatile ("rdtsc" : "=A" (val));
956
    return val;
957
}
958

    
959
#elif defined(__x86_64__)
960

    
961
static inline int64_t cpu_get_real_ticks(void)
962
{
963
    uint32_t low,high;
964
    int64_t val;
965
    asm volatile("rdtsc" : "=a" (low), "=d" (high));
966
    val = high;
967
    val <<= 32;
968
    val |= low;
969
    return val;
970
}
971

    
972
#elif defined(__hppa__)
973

    
974
static inline int64_t cpu_get_real_ticks(void)
975
{
976
    int val;
977
    asm volatile ("mfctl %%cr16, %0" : "=r"(val));
978
    return val;
979
}
980

    
981
#elif defined(__ia64)
982

    
983
static inline int64_t cpu_get_real_ticks(void)
984
{
985
        int64_t val;
986
        asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
987
        return val;
988
}
989

    
990
#elif defined(__s390__)
991

    
992
static inline int64_t cpu_get_real_ticks(void)
993
{
994
    int64_t val;
995
    asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
996
    return val;
997
}
998

    
999
#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
1000

    
1001
static inline int64_t cpu_get_real_ticks (void)
1002
{
1003
#if     defined(_LP64)
1004
        uint64_t        rval;
1005
        asm volatile("rd %%tick,%0" : "=r"(rval));
1006
        return rval;
1007
#else
1008
        union {
1009
                uint64_t i64;
1010
                struct {
1011
                        uint32_t high;
1012
                        uint32_t low;
1013
                }       i32;
1014
        } rval;
1015
        asm volatile("rd %%tick,%1; srlx %1,32,%0"
1016
                : "=r"(rval.i32.high), "=r"(rval.i32.low));
1017
        return rval.i64;
1018
#endif
1019
}
1020

    
1021
#elif defined(__mips__)
1022

    
1023
static inline int64_t cpu_get_real_ticks(void)
1024
{
1025
#if __mips_isa_rev >= 2
1026
    uint32_t count;
1027
    static uint32_t cyc_per_count = 0;
1028

    
1029
    if (!cyc_per_count)
1030
        __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count));
1031

    
1032
    __asm__ __volatile__("rdhwr %1, $2" : "=r" (count));
1033
    return (int64_t)(count * cyc_per_count);
1034
#else
1035
    /* FIXME */
1036
    static int64_t ticks = 0;
1037
    return ticks++;
1038
#endif
1039
}
1040

    
1041
#else
1042
/* The host CPU doesn't have an easily accessible cycle counter.
1043
   Just return a monotonically increasing value.  This will be
1044
   totally wrong, but hopefully better than nothing.  */
1045
static inline int64_t cpu_get_real_ticks (void)
1046
{
1047
    static int64_t ticks = 0;
1048
    return ticks++;
1049
}
1050
#endif
1051

    
1052
/* profiling */
1053
#ifdef CONFIG_PROFILER
1054
static inline int64_t profile_getclock(void)
1055
{
1056
    return cpu_get_real_ticks();
1057
}
1058

    
1059
extern int64_t kqemu_time, kqemu_time_start;
1060
extern int64_t qemu_time, qemu_time_start;
1061
extern int64_t tlb_flush_time;
1062
extern int64_t kqemu_exec_count;
1063
extern int64_t dev_time;
1064
extern int64_t kqemu_ret_int_count;
1065
extern int64_t kqemu_ret_excp_count;
1066
extern int64_t kqemu_ret_intr_count;
1067
#endif
1068

    
1069
#endif /* CPU_ALL_H */