Statistics
| Branch: | Revision:

root / cpu-all.h @ 50a9569b

History | View | Annotate | Download (24.9 kB)

1
/*
2
 * defines common to all virtual CPUs
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#ifndef CPU_ALL_H
21
#define CPU_ALL_H
22

    
23
#if defined(__arm__) || defined(__sparc__) || defined(__mips__)
24
#define WORDS_ALIGNED
25
#endif
26

    
27
/* some important defines:
28
 *
29
 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
30
 * memory accesses.
31
 *
32
 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
33
 * otherwise little endian.
34
 *
35
 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
36
 *
37
 * TARGET_WORDS_BIGENDIAN : same for target cpu
38
 */
39

    
40
#include "bswap.h"
41

    
42
#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
43
#define BSWAP_NEEDED
44
#endif
45

    
46
#ifdef BSWAP_NEEDED
47

    
48
static inline uint16_t tswap16(uint16_t s)
49
{
50
    return bswap16(s);
51
}
52

    
53
static inline uint32_t tswap32(uint32_t s)
54
{
55
    return bswap32(s);
56
}
57

    
58
static inline uint64_t tswap64(uint64_t s)
59
{
60
    return bswap64(s);
61
}
62

    
63
static inline void tswap16s(uint16_t *s)
64
{
65
    *s = bswap16(*s);
66
}
67

    
68
static inline void tswap32s(uint32_t *s)
69
{
70
    *s = bswap32(*s);
71
}
72

    
73
static inline void tswap64s(uint64_t *s)
74
{
75
    *s = bswap64(*s);
76
}
77

    
78
#else
79

    
80
static inline uint16_t tswap16(uint16_t s)
81
{
82
    return s;
83
}
84

    
85
static inline uint32_t tswap32(uint32_t s)
86
{
87
    return s;
88
}
89

    
90
static inline uint64_t tswap64(uint64_t s)
91
{
92
    return s;
93
}
94

    
95
static inline void tswap16s(uint16_t *s)
96
{
97
}
98

    
99
static inline void tswap32s(uint32_t *s)
100
{
101
}
102

    
103
static inline void tswap64s(uint64_t *s)
104
{
105
}
106

    
107
#endif
108

    
109
#if TARGET_LONG_SIZE == 4
110
#define tswapl(s) tswap32(s)
111
#define tswapls(s) tswap32s((uint32_t *)(s))
112
#define bswaptls(s) bswap32s(s)
113
#else
114
#define tswapl(s) tswap64(s)
115
#define tswapls(s) tswap64s((uint64_t *)(s))
116
#define bswaptls(s) bswap64s(s)
117
#endif
118

    
119
/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
120
   endian ! */
121
typedef union {
122
    float64 d;
123
#if defined(WORDS_BIGENDIAN) \
124
    || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
125
    struct {
126
        uint32_t upper;
127
        uint32_t lower;
128
    } l;
129
#else
130
    struct {
131
        uint32_t lower;
132
        uint32_t upper;
133
    } l;
134
#endif
135
    uint64_t ll;
136
} CPU_DoubleU;
137

    
138
#ifdef TARGET_SPARC
139
typedef union {
140
    float128 q;
141
#if defined(WORDS_BIGENDIAN) \
142
    || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
143
    struct {
144
        uint32_t upmost;
145
        uint32_t upper;
146
        uint32_t lower;
147
        uint32_t lowest;
148
    } l;
149
    struct {
150
        uint64_t upper;
151
        uint64_t lower;
152
    } ll;
153
#else
154
    struct {
155
        uint32_t lowest;
156
        uint32_t lower;
157
        uint32_t upper;
158
        uint32_t upmost;
159
    } l;
160
    struct {
161
        uint64_t lower;
162
        uint64_t upper;
163
    } ll;
164
#endif
165
} CPU_QuadU;
166
#endif
167

    
168
/* CPU memory access without any memory or io remapping */
169

    
170
/*
171
 * the generic syntax for the memory accesses is:
172
 *
173
 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
174
 *
175
 * store: st{type}{size}{endian}_{access_type}(ptr, val)
176
 *
177
 * type is:
178
 * (empty): integer access
179
 *   f    : float access
180
 *
181
 * sign is:
182
 * (empty): for floats or 32 bit size
183
 *   u    : unsigned
184
 *   s    : signed
185
 *
186
 * size is:
187
 *   b: 8 bits
188
 *   w: 16 bits
189
 *   l: 32 bits
190
 *   q: 64 bits
191
 *
192
 * endian is:
193
 * (empty): target cpu endianness or 8 bit access
194
 *   r    : reversed target cpu endianness (not implemented yet)
195
 *   be   : big endian (not implemented yet)
196
 *   le   : little endian (not implemented yet)
197
 *
198
 * access_type is:
199
 *   raw    : host memory access
200
 *   user   : user mode access using soft MMU
201
 *   kernel : kernel mode access using soft MMU
202
 */
203
static inline int ldub_p(void *ptr)
204
{
205
    return *(uint8_t *)ptr;
206
}
207

    
208
static inline int ldsb_p(void *ptr)
209
{
210
    return *(int8_t *)ptr;
211
}
212

    
213
static inline void stb_p(void *ptr, int v)
214
{
215
    *(uint8_t *)ptr = v;
216
}
217

    
218
/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
219
   kernel handles unaligned load/stores may give better results, but
220
   it is a system wide setting : bad */
221
#if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
222

    
223
/* conservative code for little endian unaligned accesses */
224
static inline int lduw_le_p(void *ptr)
225
{
226
#ifdef __powerpc__
227
    int val;
228
    __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
229
    return val;
230
#else
231
    uint8_t *p = ptr;
232
    return p[0] | (p[1] << 8);
233
#endif
234
}
235

    
236
static inline int ldsw_le_p(void *ptr)
237
{
238
#ifdef __powerpc__
239
    int val;
240
    __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
241
    return (int16_t)val;
242
#else
243
    uint8_t *p = ptr;
244
    return (int16_t)(p[0] | (p[1] << 8));
245
#endif
246
}
247

    
248
static inline int ldl_le_p(void *ptr)
249
{
250
#ifdef __powerpc__
251
    int val;
252
    __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
253
    return val;
254
#else
255
    uint8_t *p = ptr;
256
    return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
257
#endif
258
}
259

    
260
static inline uint64_t ldq_le_p(void *ptr)
261
{
262
    uint8_t *p = ptr;
263
    uint32_t v1, v2;
264
    v1 = ldl_le_p(p);
265
    v2 = ldl_le_p(p + 4);
266
    return v1 | ((uint64_t)v2 << 32);
267
}
268

    
269
static inline void stw_le_p(void *ptr, int v)
270
{
271
#ifdef __powerpc__
272
    __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
273
#else
274
    uint8_t *p = ptr;
275
    p[0] = v;
276
    p[1] = v >> 8;
277
#endif
278
}
279

    
280
static inline void stl_le_p(void *ptr, int v)
281
{
282
#ifdef __powerpc__
283
    __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
284
#else
285
    uint8_t *p = ptr;
286
    p[0] = v;
287
    p[1] = v >> 8;
288
    p[2] = v >> 16;
289
    p[3] = v >> 24;
290
#endif
291
}
292

    
293
static inline void stq_le_p(void *ptr, uint64_t v)
294
{
295
    uint8_t *p = ptr;
296
    stl_le_p(p, (uint32_t)v);
297
    stl_le_p(p + 4, v >> 32);
298
}
299

    
300
/* float access */
301

    
302
static inline float32 ldfl_le_p(void *ptr)
303
{
304
    union {
305
        float32 f;
306
        uint32_t i;
307
    } u;
308
    u.i = ldl_le_p(ptr);
309
    return u.f;
310
}
311

    
312
static inline void stfl_le_p(void *ptr, float32 v)
313
{
314
    union {
315
        float32 f;
316
        uint32_t i;
317
    } u;
318
    u.f = v;
319
    stl_le_p(ptr, u.i);
320
}
321

    
322
static inline float64 ldfq_le_p(void *ptr)
323
{
324
    CPU_DoubleU u;
325
    u.l.lower = ldl_le_p(ptr);
326
    u.l.upper = ldl_le_p(ptr + 4);
327
    return u.d;
328
}
329

    
330
static inline void stfq_le_p(void *ptr, float64 v)
331
{
332
    CPU_DoubleU u;
333
    u.d = v;
334
    stl_le_p(ptr, u.l.lower);
335
    stl_le_p(ptr + 4, u.l.upper);
336
}
337

    
338
#else
339

    
340
static inline int lduw_le_p(void *ptr)
341
{
342
    return *(uint16_t *)ptr;
343
}
344

    
345
static inline int ldsw_le_p(void *ptr)
346
{
347
    return *(int16_t *)ptr;
348
}
349

    
350
static inline int ldl_le_p(void *ptr)
351
{
352
    return *(uint32_t *)ptr;
353
}
354

    
355
static inline uint64_t ldq_le_p(void *ptr)
356
{
357
    return *(uint64_t *)ptr;
358
}
359

    
360
static inline void stw_le_p(void *ptr, int v)
361
{
362
    *(uint16_t *)ptr = v;
363
}
364

    
365
static inline void stl_le_p(void *ptr, int v)
366
{
367
    *(uint32_t *)ptr = v;
368
}
369

    
370
static inline void stq_le_p(void *ptr, uint64_t v)
371
{
372
    *(uint64_t *)ptr = v;
373
}
374

    
375
/* float access */
376

    
377
static inline float32 ldfl_le_p(void *ptr)
378
{
379
    return *(float32 *)ptr;
380
}
381

    
382
static inline float64 ldfq_le_p(void *ptr)
383
{
384
    return *(float64 *)ptr;
385
}
386

    
387
static inline void stfl_le_p(void *ptr, float32 v)
388
{
389
    *(float32 *)ptr = v;
390
}
391

    
392
static inline void stfq_le_p(void *ptr, float64 v)
393
{
394
    *(float64 *)ptr = v;
395
}
396
#endif
397

    
398
#if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
399

    
400
static inline int lduw_be_p(void *ptr)
401
{
402
#if defined(__i386__)
403
    int val;
404
    asm volatile ("movzwl %1, %0\n"
405
                  "xchgb %b0, %h0\n"
406
                  : "=q" (val)
407
                  : "m" (*(uint16_t *)ptr));
408
    return val;
409
#else
410
    uint8_t *b = (uint8_t *) ptr;
411
    return ((b[0] << 8) | b[1]);
412
#endif
413
}
414

    
415
static inline int ldsw_be_p(void *ptr)
416
{
417
#if defined(__i386__)
418
    int val;
419
    asm volatile ("movzwl %1, %0\n"
420
                  "xchgb %b0, %h0\n"
421
                  : "=q" (val)
422
                  : "m" (*(uint16_t *)ptr));
423
    return (int16_t)val;
424
#else
425
    uint8_t *b = (uint8_t *) ptr;
426
    return (int16_t)((b[0] << 8) | b[1]);
427
#endif
428
}
429

    
430
static inline int ldl_be_p(void *ptr)
431
{
432
#if defined(__i386__) || defined(__x86_64__)
433
    int val;
434
    asm volatile ("movl %1, %0\n"
435
                  "bswap %0\n"
436
                  : "=r" (val)
437
                  : "m" (*(uint32_t *)ptr));
438
    return val;
439
#else
440
    uint8_t *b = (uint8_t *) ptr;
441
    return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
442
#endif
443
}
444

    
445
static inline uint64_t ldq_be_p(void *ptr)
446
{
447
    uint32_t a,b;
448
    a = ldl_be_p(ptr);
449
    b = ldl_be_p(ptr+4);
450
    return (((uint64_t)a<<32)|b);
451
}
452

    
453
static inline void stw_be_p(void *ptr, int v)
454
{
455
#if defined(__i386__)
456
    asm volatile ("xchgb %b0, %h0\n"
457
                  "movw %w0, %1\n"
458
                  : "=q" (v)
459
                  : "m" (*(uint16_t *)ptr), "0" (v));
460
#else
461
    uint8_t *d = (uint8_t *) ptr;
462
    d[0] = v >> 8;
463
    d[1] = v;
464
#endif
465
}
466

    
467
static inline void stl_be_p(void *ptr, int v)
468
{
469
#if defined(__i386__) || defined(__x86_64__)
470
    asm volatile ("bswap %0\n"
471
                  "movl %0, %1\n"
472
                  : "=r" (v)
473
                  : "m" (*(uint32_t *)ptr), "0" (v));
474
#else
475
    uint8_t *d = (uint8_t *) ptr;
476
    d[0] = v >> 24;
477
    d[1] = v >> 16;
478
    d[2] = v >> 8;
479
    d[3] = v;
480
#endif
481
}
482

    
483
static inline void stq_be_p(void *ptr, uint64_t v)
484
{
485
    stl_be_p(ptr, v >> 32);
486
    stl_be_p(ptr + 4, v);
487
}
488

    
489
/* float access */
490

    
491
static inline float32 ldfl_be_p(void *ptr)
492
{
493
    union {
494
        float32 f;
495
        uint32_t i;
496
    } u;
497
    u.i = ldl_be_p(ptr);
498
    return u.f;
499
}
500

    
501
static inline void stfl_be_p(void *ptr, float32 v)
502
{
503
    union {
504
        float32 f;
505
        uint32_t i;
506
    } u;
507
    u.f = v;
508
    stl_be_p(ptr, u.i);
509
}
510

    
511
static inline float64 ldfq_be_p(void *ptr)
512
{
513
    CPU_DoubleU u;
514
    u.l.upper = ldl_be_p(ptr);
515
    u.l.lower = ldl_be_p(ptr + 4);
516
    return u.d;
517
}
518

    
519
static inline void stfq_be_p(void *ptr, float64 v)
520
{
521
    CPU_DoubleU u;
522
    u.d = v;
523
    stl_be_p(ptr, u.l.upper);
524
    stl_be_p(ptr + 4, u.l.lower);
525
}
526

    
527
#else
528

    
529
static inline int lduw_be_p(void *ptr)
530
{
531
    return *(uint16_t *)ptr;
532
}
533

    
534
static inline int ldsw_be_p(void *ptr)
535
{
536
    return *(int16_t *)ptr;
537
}
538

    
539
static inline int ldl_be_p(void *ptr)
540
{
541
    return *(uint32_t *)ptr;
542
}
543

    
544
static inline uint64_t ldq_be_p(void *ptr)
545
{
546
    return *(uint64_t *)ptr;
547
}
548

    
549
static inline void stw_be_p(void *ptr, int v)
550
{
551
    *(uint16_t *)ptr = v;
552
}
553

    
554
static inline void stl_be_p(void *ptr, int v)
555
{
556
    *(uint32_t *)ptr = v;
557
}
558

    
559
static inline void stq_be_p(void *ptr, uint64_t v)
560
{
561
    *(uint64_t *)ptr = v;
562
}
563

    
564
/* float access */
565

    
566
static inline float32 ldfl_be_p(void *ptr)
567
{
568
    return *(float32 *)ptr;
569
}
570

    
571
static inline float64 ldfq_be_p(void *ptr)
572
{
573
    return *(float64 *)ptr;
574
}
575

    
576
static inline void stfl_be_p(void *ptr, float32 v)
577
{
578
    *(float32 *)ptr = v;
579
}
580

    
581
static inline void stfq_be_p(void *ptr, float64 v)
582
{
583
    *(float64 *)ptr = v;
584
}
585

    
586
#endif
587

    
588
/* target CPU memory access functions */
589
#if defined(TARGET_WORDS_BIGENDIAN)
590
#define lduw_p(p) lduw_be_p(p)
591
#define ldsw_p(p) ldsw_be_p(p)
592
#define ldl_p(p) ldl_be_p(p)
593
#define ldq_p(p) ldq_be_p(p)
594
#define ldfl_p(p) ldfl_be_p(p)
595
#define ldfq_p(p) ldfq_be_p(p)
596
#define stw_p(p, v) stw_be_p(p, v)
597
#define stl_p(p, v) stl_be_p(p, v)
598
#define stq_p(p, v) stq_be_p(p, v)
599
#define stfl_p(p, v) stfl_be_p(p, v)
600
#define stfq_p(p, v) stfq_be_p(p, v)
601
#else
602
#define lduw_p(p) lduw_le_p(p)
603
#define ldsw_p(p) ldsw_le_p(p)
604
#define ldl_p(p) ldl_le_p(p)
605
#define ldq_p(p) ldq_le_p(p)
606
#define ldfl_p(p) ldfl_le_p(p)
607
#define ldfq_p(p) ldfq_le_p(p)
608
#define stw_p(p, v) stw_le_p(p, v)
609
#define stl_p(p, v) stl_le_p(p, v)
610
#define stq_p(p, v) stq_le_p(p, v)
611
#define stfl_p(p, v) stfl_le_p(p, v)
612
#define stfq_p(p, v) stfq_le_p(p, v)
613
#endif
614

    
615
/* MMU memory access macros */
616

    
617
#if defined(CONFIG_USER_ONLY)
618
/* On some host systems the guest address space is reserved on the host.
619
 * This allows the guest address space to be offset to a convenient location.
620
 */
621
//#define GUEST_BASE 0x20000000
622
#define GUEST_BASE 0
623

    
624
/* All direct uses of g2h and h2g need to go away for usermode softmmu.  */
625
#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
626
#define h2g(x) ((target_ulong)(x - GUEST_BASE))
627

    
628
#define saddr(x) g2h(x)
629
#define laddr(x) g2h(x)
630

    
631
#else /* !CONFIG_USER_ONLY */
632
/* NOTE: we use double casts if pointers and target_ulong have
633
   different sizes */
634
#define saddr(x) (uint8_t *)(long)(x)
635
#define laddr(x) (uint8_t *)(long)(x)
636
#endif
637

    
638
#define ldub_raw(p) ldub_p(laddr((p)))
639
#define ldsb_raw(p) ldsb_p(laddr((p)))
640
#define lduw_raw(p) lduw_p(laddr((p)))
641
#define ldsw_raw(p) ldsw_p(laddr((p)))
642
#define ldl_raw(p) ldl_p(laddr((p)))
643
#define ldq_raw(p) ldq_p(laddr((p)))
644
#define ldfl_raw(p) ldfl_p(laddr((p)))
645
#define ldfq_raw(p) ldfq_p(laddr((p)))
646
#define stb_raw(p, v) stb_p(saddr((p)), v)
647
#define stw_raw(p, v) stw_p(saddr((p)), v)
648
#define stl_raw(p, v) stl_p(saddr((p)), v)
649
#define stq_raw(p, v) stq_p(saddr((p)), v)
650
#define stfl_raw(p, v) stfl_p(saddr((p)), v)
651
#define stfq_raw(p, v) stfq_p(saddr((p)), v)
652

    
653

    
654
#if defined(CONFIG_USER_ONLY)
655

    
656
/* if user mode, no other memory access functions */
657
#define ldub(p) ldub_raw(p)
658
#define ldsb(p) ldsb_raw(p)
659
#define lduw(p) lduw_raw(p)
660
#define ldsw(p) ldsw_raw(p)
661
#define ldl(p) ldl_raw(p)
662
#define ldq(p) ldq_raw(p)
663
#define ldfl(p) ldfl_raw(p)
664
#define ldfq(p) ldfq_raw(p)
665
#define stb(p, v) stb_raw(p, v)
666
#define stw(p, v) stw_raw(p, v)
667
#define stl(p, v) stl_raw(p, v)
668
#define stq(p, v) stq_raw(p, v)
669
#define stfl(p, v) stfl_raw(p, v)
670
#define stfq(p, v) stfq_raw(p, v)
671

    
672
#define ldub_code(p) ldub_raw(p)
673
#define ldsb_code(p) ldsb_raw(p)
674
#define lduw_code(p) lduw_raw(p)
675
#define ldsw_code(p) ldsw_raw(p)
676
#define ldl_code(p) ldl_raw(p)
677
#define ldq_code(p) ldq_raw(p)
678

    
679
#define ldub_kernel(p) ldub_raw(p)
680
#define ldsb_kernel(p) ldsb_raw(p)
681
#define lduw_kernel(p) lduw_raw(p)
682
#define ldsw_kernel(p) ldsw_raw(p)
683
#define ldl_kernel(p) ldl_raw(p)
684
#define ldq_kernel(p) ldq_raw(p)
685
#define ldfl_kernel(p) ldfl_raw(p)
686
#define ldfq_kernel(p) ldfq_raw(p)
687
#define stb_kernel(p, v) stb_raw(p, v)
688
#define stw_kernel(p, v) stw_raw(p, v)
689
#define stl_kernel(p, v) stl_raw(p, v)
690
#define stq_kernel(p, v) stq_raw(p, v)
691
#define stfl_kernel(p, v) stfl_raw(p, v)
692
#define stfq_kernel(p, vt) stfq_raw(p, v)
693

    
694
#endif /* defined(CONFIG_USER_ONLY) */
695

    
696
/* page related stuff */
697

    
698
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
699
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
700
#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
701

    
702
/* ??? These should be the larger of unsigned long and target_ulong.  */
703
extern unsigned long qemu_real_host_page_size;
704
extern unsigned long qemu_host_page_bits;
705
extern unsigned long qemu_host_page_size;
706
extern unsigned long qemu_host_page_mask;
707

    
708
#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
709

    
710
/* same as PROT_xxx */
711
#define PAGE_READ      0x0001
712
#define PAGE_WRITE     0x0002
713
#define PAGE_EXEC      0x0004
714
#define PAGE_BITS      (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
715
#define PAGE_VALID     0x0008
716
/* original state of the write flag (used when tracking self-modifying
717
   code */
718
#define PAGE_WRITE_ORG 0x0010
719
#define PAGE_RESERVED  0x0020
720

    
721
void page_dump(FILE *f);
722
int page_get_flags(target_ulong address);
723
void page_set_flags(target_ulong start, target_ulong end, int flags);
724
int page_check_range(target_ulong start, target_ulong len, int flags);
725

    
726
CPUState *cpu_copy(CPUState *env);
727

    
728
void cpu_dump_state(CPUState *env, FILE *f,
729
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
730
                    int flags);
731
void cpu_dump_statistics (CPUState *env, FILE *f,
732
                          int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
733
                          int flags);
734

    
735
void cpu_abort(CPUState *env, const char *fmt, ...)
736
    __attribute__ ((__format__ (__printf__, 2, 3)))
737
    __attribute__ ((__noreturn__));
738
extern CPUState *first_cpu;
739
extern CPUState *cpu_single_env;
740
extern int code_copy_enabled;
741

    
742
#define CPU_INTERRUPT_EXIT   0x01 /* wants exit from main loop */
743
#define CPU_INTERRUPT_HARD   0x02 /* hardware interrupt pending */
744
#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
745
#define CPU_INTERRUPT_TIMER  0x08 /* internal timer exception pending */
746
#define CPU_INTERRUPT_FIQ    0x10 /* Fast interrupt pending.  */
747
#define CPU_INTERRUPT_HALT   0x20 /* CPU halt wanted */
748
#define CPU_INTERRUPT_SMI    0x40 /* (x86 only) SMI interrupt pending */
749
#define CPU_INTERRUPT_DEBUG  0x80 /* Debug event occured.  */
750
#define CPU_INTERRUPT_VIRQ   0x100 /* virtual interrupt pending.  */
751

    
752
void cpu_interrupt(CPUState *s, int mask);
753
void cpu_reset_interrupt(CPUState *env, int mask);
754

    
755
int cpu_watchpoint_insert(CPUState *env, target_ulong addr);
756
int cpu_watchpoint_remove(CPUState *env, target_ulong addr);
757
int cpu_breakpoint_insert(CPUState *env, target_ulong pc);
758
int cpu_breakpoint_remove(CPUState *env, target_ulong pc);
759
void cpu_single_step(CPUState *env, int enabled);
760
void cpu_reset(CPUState *s);
761

    
762
/* Return the physical page corresponding to a virtual one. Use it
763
   only for debugging because no protection checks are done. Return -1
764
   if no page found. */
765
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
766

    
767
#define CPU_LOG_TB_OUT_ASM (1 << 0)
768
#define CPU_LOG_TB_IN_ASM  (1 << 1)
769
#define CPU_LOG_TB_OP      (1 << 2)
770
#define CPU_LOG_TB_OP_OPT  (1 << 3)
771
#define CPU_LOG_INT        (1 << 4)
772
#define CPU_LOG_EXEC       (1 << 5)
773
#define CPU_LOG_PCALL      (1 << 6)
774
#define CPU_LOG_IOPORT     (1 << 7)
775
#define CPU_LOG_TB_CPU     (1 << 8)
776

    
777
/* define log items */
778
typedef struct CPULogItem {
779
    int mask;
780
    const char *name;
781
    const char *help;
782
} CPULogItem;
783

    
784
extern CPULogItem cpu_log_items[];
785

    
786
void cpu_set_log(int log_flags);
787
void cpu_set_log_filename(const char *filename);
788
int cpu_str_to_log_mask(const char *str);
789

    
790
/* IO ports API */
791

    
792
/* NOTE: as these functions may be even used when there is an isa
793
   brige on non x86 targets, we always defined them */
794
#ifndef NO_CPU_IO_DEFS
795
void cpu_outb(CPUState *env, int addr, int val);
796
void cpu_outw(CPUState *env, int addr, int val);
797
void cpu_outl(CPUState *env, int addr, int val);
798
int cpu_inb(CPUState *env, int addr);
799
int cpu_inw(CPUState *env, int addr);
800
int cpu_inl(CPUState *env, int addr);
801
#endif
802

    
803
/* memory API */
804

    
805
extern int phys_ram_size;
806
extern int phys_ram_fd;
807
extern uint8_t *phys_ram_base;
808
extern uint8_t *phys_ram_dirty;
809

    
810
/* physical memory access */
811
#define TLB_INVALID_MASK   (1 << 3)
812
#define IO_MEM_SHIFT       4
813
#define IO_MEM_NB_ENTRIES  (1 << (TARGET_PAGE_BITS  - IO_MEM_SHIFT))
814

    
815
#define IO_MEM_RAM         (0 << IO_MEM_SHIFT) /* hardcoded offset */
816
#define IO_MEM_ROM         (1 << IO_MEM_SHIFT) /* hardcoded offset */
817
#define IO_MEM_UNASSIGNED  (2 << IO_MEM_SHIFT)
818
#define IO_MEM_NOTDIRTY    (4 << IO_MEM_SHIFT) /* used internally, never use directly */
819
/* acts like a ROM when read and like a device when written. As an
820
   exception, the write memory callback gets the ram offset instead of
821
   the physical address */
822
#define IO_MEM_ROMD        (1)
823
#define IO_MEM_SUBPAGE     (2)
824

    
825
typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
826
typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
827

    
828
void cpu_register_physical_memory(target_phys_addr_t start_addr,
829
                                  unsigned long size,
830
                                  unsigned long phys_offset);
831
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr);
832
ram_addr_t qemu_ram_alloc(unsigned int size);
833
void qemu_ram_free(ram_addr_t addr);
834
int cpu_register_io_memory(int io_index,
835
                           CPUReadMemoryFunc **mem_read,
836
                           CPUWriteMemoryFunc **mem_write,
837
                           void *opaque);
838
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);
839
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);
840

    
841
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
842
                            int len, int is_write);
843
static inline void cpu_physical_memory_read(target_phys_addr_t addr,
844
                                            uint8_t *buf, int len)
845
{
846
    cpu_physical_memory_rw(addr, buf, len, 0);
847
}
848
static inline void cpu_physical_memory_write(target_phys_addr_t addr,
849
                                             const uint8_t *buf, int len)
850
{
851
    cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
852
}
853
uint32_t ldub_phys(target_phys_addr_t addr);
854
uint32_t lduw_phys(target_phys_addr_t addr);
855
uint32_t ldl_phys(target_phys_addr_t addr);
856
uint64_t ldq_phys(target_phys_addr_t addr);
857
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
858
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
859
void stb_phys(target_phys_addr_t addr, uint32_t val);
860
void stw_phys(target_phys_addr_t addr, uint32_t val);
861
void stl_phys(target_phys_addr_t addr, uint32_t val);
862
void stq_phys(target_phys_addr_t addr, uint64_t val);
863

    
864
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
865
                                   const uint8_t *buf, int len);
866
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
867
                        uint8_t *buf, int len, int is_write);
868

    
869
#define VGA_DIRTY_FLAG  0x01
870
#define CODE_DIRTY_FLAG 0x02
871

    
872
/* read dirty bit (return 0 or 1) */
873
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
874
{
875
    return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
876
}
877

    
878
static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
879
                                                int dirty_flags)
880
{
881
    return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
882
}
883

    
884
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
885
{
886
    phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
887
}
888

    
889
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
890
                                     int dirty_flags);
891
void cpu_tlb_update_dirty(CPUState *env);
892

    
893
void dump_exec_info(FILE *f,
894
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
895

    
896
/*******************************************/
897
/* host CPU ticks (if available) */
898

    
899
#if defined(__powerpc__)
900

    
901
static inline uint32_t get_tbl(void)
902
{
903
    uint32_t tbl;
904
    asm volatile("mftb %0" : "=r" (tbl));
905
    return tbl;
906
}
907

    
908
static inline uint32_t get_tbu(void)
909
{
910
        uint32_t tbl;
911
        asm volatile("mftbu %0" : "=r" (tbl));
912
        return tbl;
913
}
914

    
915
static inline int64_t cpu_get_real_ticks(void)
916
{
917
    uint32_t l, h, h1;
918
    /* NOTE: we test if wrapping has occurred */
919
    do {
920
        h = get_tbu();
921
        l = get_tbl();
922
        h1 = get_tbu();
923
    } while (h != h1);
924
    return ((int64_t)h << 32) | l;
925
}
926

    
927
#elif defined(__i386__)
928

    
929
static inline int64_t cpu_get_real_ticks(void)
930
{
931
    int64_t val;
932
    asm volatile ("rdtsc" : "=A" (val));
933
    return val;
934
}
935

    
936
#elif defined(__x86_64__)
937

    
938
static inline int64_t cpu_get_real_ticks(void)
939
{
940
    uint32_t low,high;
941
    int64_t val;
942
    asm volatile("rdtsc" : "=a" (low), "=d" (high));
943
    val = high;
944
    val <<= 32;
945
    val |= low;
946
    return val;
947
}
948

    
949
#elif defined(__ia64)
950

    
951
static inline int64_t cpu_get_real_ticks(void)
952
{
953
        int64_t val;
954
        asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
955
        return val;
956
}
957

    
958
#elif defined(__s390__)
959

    
960
static inline int64_t cpu_get_real_ticks(void)
961
{
962
    int64_t val;
963
    asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
964
    return val;
965
}
966

    
967
#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
968

    
969
static inline int64_t cpu_get_real_ticks (void)
970
{
971
#if     defined(_LP64)
972
        uint64_t        rval;
973
        asm volatile("rd %%tick,%0" : "=r"(rval));
974
        return rval;
975
#else
976
        union {
977
                uint64_t i64;
978
                struct {
979
                        uint32_t high;
980
                        uint32_t low;
981
                }       i32;
982
        } rval;
983
        asm volatile("rd %%tick,%1; srlx %1,32,%0"
984
                : "=r"(rval.i32.high), "=r"(rval.i32.low));
985
        return rval.i64;
986
#endif
987
}
988

    
989
#elif defined(__mips__)
990

    
991
static inline int64_t cpu_get_real_ticks(void)
992
{
993
#if __mips_isa_rev >= 2
994
    uint32_t count;
995
    static uint32_t cyc_per_count = 0;
996

    
997
    if (!cyc_per_count)
998
        __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count));
999

    
1000
    __asm__ __volatile__("rdhwr %1, $2" : "=r" (count));
1001
    return (int64_t)(count * cyc_per_count);
1002
#else
1003
    /* FIXME */
1004
    static int64_t ticks = 0;
1005
    return ticks++;
1006
#endif
1007
}
1008

    
1009
#else
1010
/* The host CPU doesn't have an easily accessible cycle counter.
1011
   Just return a monotonically increasing value.  This will be
1012
   totally wrong, but hopefully better than nothing.  */
1013
static inline int64_t cpu_get_real_ticks (void)
1014
{
1015
    static int64_t ticks = 0;
1016
    return ticks++;
1017
}
1018
#endif
1019

    
1020
/* profiling */
1021
#ifdef CONFIG_PROFILER
1022
static inline int64_t profile_getclock(void)
1023
{
1024
    return cpu_get_real_ticks();
1025
}
1026

    
1027
extern int64_t kqemu_time, kqemu_time_start;
1028
extern int64_t qemu_time, qemu_time_start;
1029
extern int64_t tlb_flush_time;
1030
extern int64_t kqemu_exec_count;
1031
extern int64_t dev_time;
1032
extern int64_t kqemu_ret_int_count;
1033
extern int64_t kqemu_ret_excp_count;
1034
extern int64_t kqemu_ret_intr_count;
1035

    
1036
#endif
1037

    
1038
#endif /* CPU_ALL_H */