Statistics
| Branch: | Revision:

root / target-sparc / op_helper.c @ 4d2c2b77

History | View | Annotate | Download (123.5 kB)

1
#include "exec.h"
2
#include "host-utils.h"
3
#include "helper.h"
4
#include "sysemu.h"
5

    
6
//#define DEBUG_MMU
7
//#define DEBUG_MXCC
8
//#define DEBUG_UNALIGNED
9
//#define DEBUG_UNASSIGNED
10
//#define DEBUG_ASI
11
//#define DEBUG_PCALL
12
//#define DEBUG_PSTATE
13
//#define DEBUG_CACHE_CONTROL
14

    
15
#ifdef DEBUG_MMU
16
#define DPRINTF_MMU(fmt, ...)                                   \
17
    do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
18
#else
19
#define DPRINTF_MMU(fmt, ...) do {} while (0)
20
#endif
21

    
22
#ifdef DEBUG_MXCC
23
#define DPRINTF_MXCC(fmt, ...)                                  \
24
    do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
25
#else
26
#define DPRINTF_MXCC(fmt, ...) do {} while (0)
27
#endif
28

    
29
#ifdef DEBUG_ASI
30
#define DPRINTF_ASI(fmt, ...)                                   \
31
    do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
32
#endif
33

    
34
#ifdef DEBUG_PSTATE
35
#define DPRINTF_PSTATE(fmt, ...)                                   \
36
    do { printf("PSTATE: " fmt , ## __VA_ARGS__); } while (0)
37
#else
38
#define DPRINTF_PSTATE(fmt, ...) do {} while (0)
39
#endif
40

    
41
#ifdef DEBUG_CACHE_CONTROL
42
#define DPRINTF_CACHE_CONTROL(fmt, ...)                                   \
43
    do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
44
#else
45
#define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
46
#endif
47

    
48
#ifdef TARGET_SPARC64
49
#ifndef TARGET_ABI32
50
#define AM_CHECK(env1) ((env1)->pstate & PS_AM)
51
#else
52
#define AM_CHECK(env1) (1)
53
#endif
54
#endif
55

    
56
#define DT0 (env->dt0)
57
#define DT1 (env->dt1)
58
#define QT0 (env->qt0)
59
#define QT1 (env->qt1)
60

    
61
/* Leon3 cache control */
62

    
63
/* Cache control: emulate the behavior of cache control registers but without
64
   any effect on the emulated */
65

    
66
#define CACHE_STATE_MASK 0x3
67
#define CACHE_DISABLED   0x0
68
#define CACHE_FROZEN     0x1
69
#define CACHE_ENABLED    0x3
70

    
71
/* Cache Control register fields */
72

    
73
#define CACHE_CTRL_IF (1 <<  4)  /* Instruction Cache Freeze on Interrupt */
74
#define CACHE_CTRL_DF (1 <<  5)  /* Data Cache Freeze on Interrupt */
75
#define CACHE_CTRL_DP (1 << 14)  /* Data cache flush pending */
76
#define CACHE_CTRL_IP (1 << 15)  /* Instruction cache flush pending */
77
#define CACHE_CTRL_IB (1 << 16)  /* Instruction burst fetch */
78
#define CACHE_CTRL_FI (1 << 21)  /* Flush Instruction cache (Write only) */
79
#define CACHE_CTRL_FD (1 << 22)  /* Flush Data cache (Write only) */
80
#define CACHE_CTRL_DS (1 << 23)  /* Data cache snoop enable */
81

    
82
#if defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
83
static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
84
                          int is_asi, int size);
85
#endif
86

    
87
#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
88
// Calculates TSB pointer value for fault page size 8k or 64k
89
static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
90
                                       uint64_t tag_access_register,
91
                                       int page_size)
92
{
93
    uint64_t tsb_base = tsb_register & ~0x1fffULL;
94
    int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
95
    int tsb_size  = tsb_register & 0xf;
96

    
97
    // discard lower 13 bits which hold tag access context
98
    uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
99

    
100
    // now reorder bits
101
    uint64_t tsb_base_mask = ~0x1fffULL;
102
    uint64_t va = tag_access_va;
103

    
104
    // move va bits to correct position
105
    if (page_size == 8*1024) {
106
        va >>= 9;
107
    } else if (page_size == 64*1024) {
108
        va >>= 12;
109
    }
110

    
111
    if (tsb_size) {
112
        tsb_base_mask <<= tsb_size;
113
    }
114

    
115
    // calculate tsb_base mask and adjust va if split is in use
116
    if (tsb_split) {
117
        if (page_size == 8*1024) {
118
            va &= ~(1ULL << (13 + tsb_size));
119
        } else if (page_size == 64*1024) {
120
            va |= (1ULL << (13 + tsb_size));
121
        }
122
        tsb_base_mask <<= 1;
123
    }
124

    
125
    return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
126
}
127

    
128
// Calculates tag target register value by reordering bits
129
// in tag access register
130
static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
131
{
132
    return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
133
}
134

    
135
static void replace_tlb_entry(SparcTLBEntry *tlb,
136
                              uint64_t tlb_tag, uint64_t tlb_tte,
137
                              CPUState *env1)
138
{
139
    target_ulong mask, size, va, offset;
140

    
141
    // flush page range if translation is valid
142
    if (TTE_IS_VALID(tlb->tte)) {
143

    
144
        mask = 0xffffffffffffe000ULL;
145
        mask <<= 3 * ((tlb->tte >> 61) & 3);
146
        size = ~mask + 1;
147

    
148
        va = tlb->tag & mask;
149

    
150
        for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
151
            tlb_flush_page(env1, va + offset);
152
        }
153
    }
154

    
155
    tlb->tag = tlb_tag;
156
    tlb->tte = tlb_tte;
157
}
158

    
159
static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
160
                      const char* strmmu, CPUState *env1)
161
{
162
    unsigned int i;
163
    target_ulong mask;
164
    uint64_t context;
165

    
166
    int is_demap_context = (demap_addr >> 6) & 1;
167

    
168
    // demap context
169
    switch ((demap_addr >> 4) & 3) {
170
    case 0: // primary
171
        context = env1->dmmu.mmu_primary_context;
172
        break;
173
    case 1: // secondary
174
        context = env1->dmmu.mmu_secondary_context;
175
        break;
176
    case 2: // nucleus
177
        context = 0;
178
        break;
179
    case 3: // reserved
180
    default:
181
        return;
182
    }
183

    
184
    for (i = 0; i < 64; i++) {
185
        if (TTE_IS_VALID(tlb[i].tte)) {
186

    
187
            if (is_demap_context) {
188
                // will remove non-global entries matching context value
189
                if (TTE_IS_GLOBAL(tlb[i].tte) ||
190
                    !tlb_compare_context(&tlb[i], context)) {
191
                    continue;
192
                }
193
            } else {
194
                // demap page
195
                // will remove any entry matching VA
196
                mask = 0xffffffffffffe000ULL;
197
                mask <<= 3 * ((tlb[i].tte >> 61) & 3);
198

    
199
                if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
200
                    continue;
201
                }
202

    
203
                // entry should be global or matching context value
204
                if (!TTE_IS_GLOBAL(tlb[i].tte) &&
205
                    !tlb_compare_context(&tlb[i], context)) {
206
                    continue;
207
                }
208
            }
209

    
210
            replace_tlb_entry(&tlb[i], 0, 0, env1);
211
#ifdef DEBUG_MMU
212
            DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
213
            dump_mmu(stdout, fprintf, env1);
214
#endif
215
        }
216
    }
217
}
218

    
219
static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
220
                                 uint64_t tlb_tag, uint64_t tlb_tte,
221
                                 const char* strmmu, CPUState *env1)
222
{
223
    unsigned int i, replace_used;
224

    
225
    // Try replacing invalid entry
226
    for (i = 0; i < 64; i++) {
227
        if (!TTE_IS_VALID(tlb[i].tte)) {
228
            replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
229
#ifdef DEBUG_MMU
230
            DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
231
            dump_mmu(stdout, fprintf, env1);
232
#endif
233
            return;
234
        }
235
    }
236

    
237
    // All entries are valid, try replacing unlocked entry
238

    
239
    for (replace_used = 0; replace_used < 2; ++replace_used) {
240

    
241
        // Used entries are not replaced on first pass
242

    
243
        for (i = 0; i < 64; i++) {
244
            if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
245

    
246
                replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
247
#ifdef DEBUG_MMU
248
                DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
249
                            strmmu, (replace_used?"used":"unused"), i);
250
                dump_mmu(stdout, fprintf, env1);
251
#endif
252
                return;
253
            }
254
        }
255

    
256
        // Now reset used bit and search for unused entries again
257

    
258
        for (i = 0; i < 64; i++) {
259
            TTE_SET_UNUSED(tlb[i].tte);
260
        }
261
    }
262

    
263
#ifdef DEBUG_MMU
264
    DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
265
#endif
266
    // error state?
267
}
268

    
269
#endif
270

    
271
static inline target_ulong address_mask(CPUState *env1, target_ulong addr)
272
{
273
#ifdef TARGET_SPARC64
274
    if (AM_CHECK(env1))
275
        addr &= 0xffffffffULL;
276
#endif
277
    return addr;
278
}
279

    
280
/* returns true if access using this ASI is to have address translated by MMU
281
   otherwise access is to raw physical address */
282
static inline int is_translating_asi(int asi)
283
{
284
#ifdef TARGET_SPARC64
285
    /* Ultrasparc IIi translating asi
286
       - note this list is defined by cpu implementation
287
     */
288
    switch (asi) {
289
    case 0x04 ... 0x11:
290
    case 0x18 ... 0x19:
291
    case 0x24 ... 0x2C:
292
    case 0x70 ... 0x73:
293
    case 0x78 ... 0x79:
294
    case 0x80 ... 0xFF:
295
        return 1;
296

    
297
    default:
298
        return 0;
299
    }
300
#else
301
    /* TODO: check sparc32 bits */
302
    return 0;
303
#endif
304
}
305

    
306
static inline target_ulong asi_address_mask(CPUState *env1,
307
                                            int asi, target_ulong addr)
308
{
309
    if (is_translating_asi(asi)) {
310
        return address_mask(env, addr);
311
    } else {
312
        return addr;
313
    }
314
}
315

    
316
static void raise_exception(int tt)
317
{
318
    env->exception_index = tt;
319
    cpu_loop_exit();
320
}
321

    
322
void HELPER(raise_exception)(int tt)
323
{
324
    raise_exception(tt);
325
}
326

    
327
void helper_shutdown(void)
328
{
329
#if !defined(CONFIG_USER_ONLY)
330
    qemu_system_shutdown_request();
331
#endif
332
}
333

    
334
void helper_check_align(target_ulong addr, uint32_t align)
335
{
336
    if (addr & align) {
337
#ifdef DEBUG_UNALIGNED
338
    printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
339
           "\n", addr, env->pc);
340
#endif
341
        raise_exception(TT_UNALIGNED);
342
    }
343
}
344

    
345
#define F_HELPER(name, p) void helper_f##name##p(void)
346

    
347
#define F_BINOP(name)                                           \
348
    float32 helper_f ## name ## s (float32 src1, float32 src2)  \
349
    {                                                           \
350
        return float32_ ## name (src1, src2, &env->fp_status);  \
351
    }                                                           \
352
    F_HELPER(name, d)                                           \
353
    {                                                           \
354
        DT0 = float64_ ## name (DT0, DT1, &env->fp_status);     \
355
    }                                                           \
356
    F_HELPER(name, q)                                           \
357
    {                                                           \
358
        QT0 = float128_ ## name (QT0, QT1, &env->fp_status);    \
359
    }
360

    
361
F_BINOP(add);
362
F_BINOP(sub);
363
F_BINOP(mul);
364
F_BINOP(div);
365
#undef F_BINOP
366

    
367
void helper_fsmuld(float32 src1, float32 src2)
368
{
369
    DT0 = float64_mul(float32_to_float64(src1, &env->fp_status),
370
                      float32_to_float64(src2, &env->fp_status),
371
                      &env->fp_status);
372
}
373

    
374
void helper_fdmulq(void)
375
{
376
    QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
377
                       float64_to_float128(DT1, &env->fp_status),
378
                       &env->fp_status);
379
}
380

    
381
float32 helper_fnegs(float32 src)
382
{
383
    return float32_chs(src);
384
}
385

    
386
#ifdef TARGET_SPARC64
387
F_HELPER(neg, d)
388
{
389
    DT0 = float64_chs(DT1);
390
}
391

    
392
F_HELPER(neg, q)
393
{
394
    QT0 = float128_chs(QT1);
395
}
396
#endif
397

    
398
/* Integer to float conversion.  */
399
float32 helper_fitos(int32_t src)
400
{
401
    return int32_to_float32(src, &env->fp_status);
402
}
403

    
404
void helper_fitod(int32_t src)
405
{
406
    DT0 = int32_to_float64(src, &env->fp_status);
407
}
408

    
409
void helper_fitoq(int32_t src)
410
{
411
    QT0 = int32_to_float128(src, &env->fp_status);
412
}
413

    
414
#ifdef TARGET_SPARC64
415
float32 helper_fxtos(void)
416
{
417
    return int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
418
}
419

    
420
F_HELPER(xto, d)
421
{
422
    DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
423
}
424

    
425
F_HELPER(xto, q)
426
{
427
    QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
428
}
429
#endif
430
#undef F_HELPER
431

    
432
/* floating point conversion */
433
float32 helper_fdtos(void)
434
{
435
    return float64_to_float32(DT1, &env->fp_status);
436
}
437

    
438
void helper_fstod(float32 src)
439
{
440
    DT0 = float32_to_float64(src, &env->fp_status);
441
}
442

    
443
float32 helper_fqtos(void)
444
{
445
    return float128_to_float32(QT1, &env->fp_status);
446
}
447

    
448
void helper_fstoq(float32 src)
449
{
450
    QT0 = float32_to_float128(src, &env->fp_status);
451
}
452

    
453
void helper_fqtod(void)
454
{
455
    DT0 = float128_to_float64(QT1, &env->fp_status);
456
}
457

    
458
void helper_fdtoq(void)
459
{
460
    QT0 = float64_to_float128(DT1, &env->fp_status);
461
}
462

    
463
/* Float to integer conversion.  */
464
int32_t helper_fstoi(float32 src)
465
{
466
    return float32_to_int32_round_to_zero(src, &env->fp_status);
467
}
468

    
469
int32_t helper_fdtoi(void)
470
{
471
    return float64_to_int32_round_to_zero(DT1, &env->fp_status);
472
}
473

    
474
int32_t helper_fqtoi(void)
475
{
476
    return float128_to_int32_round_to_zero(QT1, &env->fp_status);
477
}
478

    
479
#ifdef TARGET_SPARC64
480
void helper_fstox(float32 src)
481
{
482
    *((int64_t *)&DT0) = float32_to_int64_round_to_zero(src, &env->fp_status);
483
}
484

    
485
void helper_fdtox(void)
486
{
487
    *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
488
}
489

    
490
void helper_fqtox(void)
491
{
492
    *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
493
}
494

    
495
void helper_faligndata(void)
496
{
497
    uint64_t tmp;
498

    
499
    tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
500
    /* on many architectures a shift of 64 does nothing */
501
    if ((env->gsr & 7) != 0) {
502
        tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
503
    }
504
    *((uint64_t *)&DT0) = tmp;
505
}
506

    
507
#ifdef HOST_WORDS_BIGENDIAN
508
#define VIS_B64(n) b[7 - (n)]
509
#define VIS_W64(n) w[3 - (n)]
510
#define VIS_SW64(n) sw[3 - (n)]
511
#define VIS_L64(n) l[1 - (n)]
512
#define VIS_B32(n) b[3 - (n)]
513
#define VIS_W32(n) w[1 - (n)]
514
#else
515
#define VIS_B64(n) b[n]
516
#define VIS_W64(n) w[n]
517
#define VIS_SW64(n) sw[n]
518
#define VIS_L64(n) l[n]
519
#define VIS_B32(n) b[n]
520
#define VIS_W32(n) w[n]
521
#endif
522

    
523
typedef union {
524
    uint8_t b[8];
525
    uint16_t w[4];
526
    int16_t sw[4];
527
    uint32_t l[2];
528
    float64 d;
529
} vis64;
530

    
531
typedef union {
532
    uint8_t b[4];
533
    uint16_t w[2];
534
    uint32_t l;
535
    float32 f;
536
} vis32;
537

    
538
void helper_fpmerge(void)
539
{
540
    vis64 s, d;
541

    
542
    s.d = DT0;
543
    d.d = DT1;
544

    
545
    // Reverse calculation order to handle overlap
546
    d.VIS_B64(7) = s.VIS_B64(3);
547
    d.VIS_B64(6) = d.VIS_B64(3);
548
    d.VIS_B64(5) = s.VIS_B64(2);
549
    d.VIS_B64(4) = d.VIS_B64(2);
550
    d.VIS_B64(3) = s.VIS_B64(1);
551
    d.VIS_B64(2) = d.VIS_B64(1);
552
    d.VIS_B64(1) = s.VIS_B64(0);
553
    //d.VIS_B64(0) = d.VIS_B64(0);
554

    
555
    DT0 = d.d;
556
}
557

    
558
void helper_fmul8x16(void)
559
{
560
    vis64 s, d;
561
    uint32_t tmp;
562

    
563
    s.d = DT0;
564
    d.d = DT1;
565

    
566
#define PMUL(r)                                                 \
567
    tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r);       \
568
    if ((tmp & 0xff) > 0x7f)                                    \
569
        tmp += 0x100;                                           \
570
    d.VIS_W64(r) = tmp >> 8;
571

    
572
    PMUL(0);
573
    PMUL(1);
574
    PMUL(2);
575
    PMUL(3);
576
#undef PMUL
577

    
578
    DT0 = d.d;
579
}
580

    
581
void helper_fmul8x16al(void)
582
{
583
    vis64 s, d;
584
    uint32_t tmp;
585

    
586
    s.d = DT0;
587
    d.d = DT1;
588

    
589
#define PMUL(r)                                                 \
590
    tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r);       \
591
    if ((tmp & 0xff) > 0x7f)                                    \
592
        tmp += 0x100;                                           \
593
    d.VIS_W64(r) = tmp >> 8;
594

    
595
    PMUL(0);
596
    PMUL(1);
597
    PMUL(2);
598
    PMUL(3);
599
#undef PMUL
600

    
601
    DT0 = d.d;
602
}
603

    
604
void helper_fmul8x16au(void)
605
{
606
    vis64 s, d;
607
    uint32_t tmp;
608

    
609
    s.d = DT0;
610
    d.d = DT1;
611

    
612
#define PMUL(r)                                                 \
613
    tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r);       \
614
    if ((tmp & 0xff) > 0x7f)                                    \
615
        tmp += 0x100;                                           \
616
    d.VIS_W64(r) = tmp >> 8;
617

    
618
    PMUL(0);
619
    PMUL(1);
620
    PMUL(2);
621
    PMUL(3);
622
#undef PMUL
623

    
624
    DT0 = d.d;
625
}
626

    
627
void helper_fmul8sux16(void)
628
{
629
    vis64 s, d;
630
    uint32_t tmp;
631

    
632
    s.d = DT0;
633
    d.d = DT1;
634

    
635
#define PMUL(r)                                                         \
636
    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
637
    if ((tmp & 0xff) > 0x7f)                                            \
638
        tmp += 0x100;                                                   \
639
    d.VIS_W64(r) = tmp >> 8;
640

    
641
    PMUL(0);
642
    PMUL(1);
643
    PMUL(2);
644
    PMUL(3);
645
#undef PMUL
646

    
647
    DT0 = d.d;
648
}
649

    
650
void helper_fmul8ulx16(void)
651
{
652
    vis64 s, d;
653
    uint32_t tmp;
654

    
655
    s.d = DT0;
656
    d.d = DT1;
657

    
658
#define PMUL(r)                                                         \
659
    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
660
    if ((tmp & 0xff) > 0x7f)                                            \
661
        tmp += 0x100;                                                   \
662
    d.VIS_W64(r) = tmp >> 8;
663

    
664
    PMUL(0);
665
    PMUL(1);
666
    PMUL(2);
667
    PMUL(3);
668
#undef PMUL
669

    
670
    DT0 = d.d;
671
}
672

    
673
void helper_fmuld8sux16(void)
674
{
675
    vis64 s, d;
676
    uint32_t tmp;
677

    
678
    s.d = DT0;
679
    d.d = DT1;
680

    
681
#define PMUL(r)                                                         \
682
    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
683
    if ((tmp & 0xff) > 0x7f)                                            \
684
        tmp += 0x100;                                                   \
685
    d.VIS_L64(r) = tmp;
686

    
687
    // Reverse calculation order to handle overlap
688
    PMUL(1);
689
    PMUL(0);
690
#undef PMUL
691

    
692
    DT0 = d.d;
693
}
694

    
695
void helper_fmuld8ulx16(void)
696
{
697
    vis64 s, d;
698
    uint32_t tmp;
699

    
700
    s.d = DT0;
701
    d.d = DT1;
702

    
703
#define PMUL(r)                                                         \
704
    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
705
    if ((tmp & 0xff) > 0x7f)                                            \
706
        tmp += 0x100;                                                   \
707
    d.VIS_L64(r) = tmp;
708

    
709
    // Reverse calculation order to handle overlap
710
    PMUL(1);
711
    PMUL(0);
712
#undef PMUL
713

    
714
    DT0 = d.d;
715
}
716

    
717
void helper_fexpand(void)
718
{
719
    vis32 s;
720
    vis64 d;
721

    
722
    s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
723
    d.d = DT1;
724
    d.VIS_W64(0) = s.VIS_B32(0) << 4;
725
    d.VIS_W64(1) = s.VIS_B32(1) << 4;
726
    d.VIS_W64(2) = s.VIS_B32(2) << 4;
727
    d.VIS_W64(3) = s.VIS_B32(3) << 4;
728

    
729
    DT0 = d.d;
730
}
731

    
732
#define VIS_HELPER(name, F)                             \
733
    void name##16(void)                                 \
734
    {                                                   \
735
        vis64 s, d;                                     \
736
                                                        \
737
        s.d = DT0;                                      \
738
        d.d = DT1;                                      \
739
                                                        \
740
        d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0));   \
741
        d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1));   \
742
        d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2));   \
743
        d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3));   \
744
                                                        \
745
        DT0 = d.d;                                      \
746
    }                                                   \
747
                                                        \
748
    uint32_t name##16s(uint32_t src1, uint32_t src2)    \
749
    {                                                   \
750
        vis32 s, d;                                     \
751
                                                        \
752
        s.l = src1;                                     \
753
        d.l = src2;                                     \
754
                                                        \
755
        d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0));   \
756
        d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1));   \
757
                                                        \
758
        return d.l;                                     \
759
    }                                                   \
760
                                                        \
761
    void name##32(void)                                 \
762
    {                                                   \
763
        vis64 s, d;                                     \
764
                                                        \
765
        s.d = DT0;                                      \
766
        d.d = DT1;                                      \
767
                                                        \
768
        d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0));   \
769
        d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1));   \
770
                                                        \
771
        DT0 = d.d;                                      \
772
    }                                                   \
773
                                                        \
774
    uint32_t name##32s(uint32_t src1, uint32_t src2)    \
775
    {                                                   \
776
        vis32 s, d;                                     \
777
                                                        \
778
        s.l = src1;                                     \
779
        d.l = src2;                                     \
780
                                                        \
781
        d.l = F(d.l, s.l);                              \
782
                                                        \
783
        return d.l;                                     \
784
    }
785

    
786
#define FADD(a, b) ((a) + (b))
787
#define FSUB(a, b) ((a) - (b))
788
VIS_HELPER(helper_fpadd, FADD)
789
VIS_HELPER(helper_fpsub, FSUB)
790

    
791
#define VIS_CMPHELPER(name, F)                                        \
792
    void name##16(void)                                           \
793
    {                                                             \
794
        vis64 s, d;                                               \
795
                                                                  \
796
        s.d = DT0;                                                \
797
        d.d = DT1;                                                \
798
                                                                  \
799
        d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0;       \
800
        d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0;      \
801
        d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0;      \
802
        d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0;      \
803
                                                                  \
804
        DT0 = d.d;                                                \
805
    }                                                             \
806
                                                                  \
807
    void name##32(void)                                           \
808
    {                                                             \
809
        vis64 s, d;                                               \
810
                                                                  \
811
        s.d = DT0;                                                \
812
        d.d = DT1;                                                \
813
                                                                  \
814
        d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0;       \
815
        d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0;      \
816
                                                                  \
817
        DT0 = d.d;                                                \
818
    }
819

    
820
#define FCMPGT(a, b) ((a) > (b))
821
#define FCMPEQ(a, b) ((a) == (b))
822
#define FCMPLE(a, b) ((a) <= (b))
823
#define FCMPNE(a, b) ((a) != (b))
824

    
825
VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
826
VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
827
VIS_CMPHELPER(helper_fcmple, FCMPLE)
828
VIS_CMPHELPER(helper_fcmpne, FCMPNE)
829
#endif
830

    
831
void helper_check_ieee_exceptions(void)
832
{
833
    target_ulong status;
834

    
835
    status = get_float_exception_flags(&env->fp_status);
836
    if (status) {
837
        /* Copy IEEE 754 flags into FSR */
838
        if (status & float_flag_invalid)
839
            env->fsr |= FSR_NVC;
840
        if (status & float_flag_overflow)
841
            env->fsr |= FSR_OFC;
842
        if (status & float_flag_underflow)
843
            env->fsr |= FSR_UFC;
844
        if (status & float_flag_divbyzero)
845
            env->fsr |= FSR_DZC;
846
        if (status & float_flag_inexact)
847
            env->fsr |= FSR_NXC;
848

    
849
        if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
850
            /* Unmasked exception, generate a trap */
851
            env->fsr |= FSR_FTT_IEEE_EXCP;
852
            raise_exception(TT_FP_EXCP);
853
        } else {
854
            /* Accumulate exceptions */
855
            env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
856
        }
857
    }
858
}
859

    
860
void helper_clear_float_exceptions(void)
861
{
862
    set_float_exception_flags(0, &env->fp_status);
863
}
864

    
865
float32 helper_fabss(float32 src)
866
{
867
    return float32_abs(src);
868
}
869

    
870
#ifdef TARGET_SPARC64
871
void helper_fabsd(void)
872
{
873
    DT0 = float64_abs(DT1);
874
}
875

    
876
void helper_fabsq(void)
877
{
878
    QT0 = float128_abs(QT1);
879
}
880
#endif
881

    
882
float32 helper_fsqrts(float32 src)
883
{
884
    return float32_sqrt(src, &env->fp_status);
885
}
886

    
887
void helper_fsqrtd(void)
888
{
889
    DT0 = float64_sqrt(DT1, &env->fp_status);
890
}
891

    
892
void helper_fsqrtq(void)
893
{
894
    QT0 = float128_sqrt(QT1, &env->fp_status);
895
}
896

    
897
#define GEN_FCMP(name, size, reg1, reg2, FS, E)                         \
898
    void glue(helper_, name) (void)                                     \
899
    {                                                                   \
900
        env->fsr &= FSR_FTT_NMASK;                                      \
901
        if (E && (glue(size, _is_any_nan)(reg1) ||                      \
902
                     glue(size, _is_any_nan)(reg2)) &&                  \
903
            (env->fsr & FSR_NVM)) {                                     \
904
            env->fsr |= FSR_NVC;                                        \
905
            env->fsr |= FSR_FTT_IEEE_EXCP;                              \
906
            raise_exception(TT_FP_EXCP);                                \
907
        }                                                               \
908
        switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) {   \
909
        case float_relation_unordered:                                  \
910
            if ((env->fsr & FSR_NVM)) {                                 \
911
                env->fsr |= FSR_NVC;                                    \
912
                env->fsr |= FSR_FTT_IEEE_EXCP;                          \
913
                raise_exception(TT_FP_EXCP);                            \
914
            } else {                                                    \
915
                env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);             \
916
                env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS;                \
917
                env->fsr |= FSR_NVA;                                    \
918
            }                                                           \
919
            break;                                                      \
920
        case float_relation_less:                                       \
921
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
922
            env->fsr |= FSR_FCC0 << FS;                                 \
923
            break;                                                      \
924
        case float_relation_greater:                                    \
925
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
926
            env->fsr |= FSR_FCC1 << FS;                                 \
927
            break;                                                      \
928
        default:                                                        \
929
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
930
            break;                                                      \
931
        }                                                               \
932
    }
933
#define GEN_FCMPS(name, size, FS, E)                                    \
934
    void glue(helper_, name)(float32 src1, float32 src2)                \
935
    {                                                                   \
936
        env->fsr &= FSR_FTT_NMASK;                                      \
937
        if (E && (glue(size, _is_any_nan)(src1) ||                      \
938
                     glue(size, _is_any_nan)(src2)) &&                  \
939
            (env->fsr & FSR_NVM)) {                                     \
940
            env->fsr |= FSR_NVC;                                        \
941
            env->fsr |= FSR_FTT_IEEE_EXCP;                              \
942
            raise_exception(TT_FP_EXCP);                                \
943
        }                                                               \
944
        switch (glue(size, _compare) (src1, src2, &env->fp_status)) {   \
945
        case float_relation_unordered:                                  \
946
            if ((env->fsr & FSR_NVM)) {                                 \
947
                env->fsr |= FSR_NVC;                                    \
948
                env->fsr |= FSR_FTT_IEEE_EXCP;                          \
949
                raise_exception(TT_FP_EXCP);                            \
950
            } else {                                                    \
951
                env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);             \
952
                env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS;                \
953
                env->fsr |= FSR_NVA;                                    \
954
            }                                                           \
955
            break;                                                      \
956
        case float_relation_less:                                       \
957
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
958
            env->fsr |= FSR_FCC0 << FS;                                 \
959
            break;                                                      \
960
        case float_relation_greater:                                    \
961
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
962
            env->fsr |= FSR_FCC1 << FS;                                 \
963
            break;                                                      \
964
        default:                                                        \
965
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
966
            break;                                                      \
967
        }                                                               \
968
    }
969

    
970
GEN_FCMPS(fcmps, float32, 0, 0);
971
GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
972

    
973
GEN_FCMPS(fcmpes, float32, 0, 1);
974
GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
975

    
976
GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
977
GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
978

    
979
static uint32_t compute_all_flags(void)
980
{
981
    return env->psr & PSR_ICC;
982
}
983

    
984
static uint32_t compute_C_flags(void)
985
{
986
    return env->psr & PSR_CARRY;
987
}
988

    
989
static inline uint32_t get_NZ_icc(int32_t dst)
990
{
991
    uint32_t ret = 0;
992

    
993
    if (dst == 0) {
994
        ret = PSR_ZERO;
995
    } else if (dst < 0) {
996
        ret = PSR_NEG;
997
    }
998
    return ret;
999
}
1000

    
1001
#ifdef TARGET_SPARC64
1002
static uint32_t compute_all_flags_xcc(void)
1003
{
1004
    return env->xcc & PSR_ICC;
1005
}
1006

    
1007
static uint32_t compute_C_flags_xcc(void)
1008
{
1009
    return env->xcc & PSR_CARRY;
1010
}
1011

    
1012
static inline uint32_t get_NZ_xcc(target_long dst)
1013
{
1014
    uint32_t ret = 0;
1015

    
1016
    if (!dst) {
1017
        ret = PSR_ZERO;
1018
    } else if (dst < 0) {
1019
        ret = PSR_NEG;
1020
    }
1021
    return ret;
1022
}
1023
#endif
1024

    
1025
static inline uint32_t get_V_div_icc(target_ulong src2)
1026
{
1027
    uint32_t ret = 0;
1028

    
1029
    if (src2 != 0) {
1030
        ret = PSR_OVF;
1031
    }
1032
    return ret;
1033
}
1034

    
1035
static uint32_t compute_all_div(void)
1036
{
1037
    uint32_t ret;
1038

    
1039
    ret = get_NZ_icc(CC_DST);
1040
    ret |= get_V_div_icc(CC_SRC2);
1041
    return ret;
1042
}
1043

    
1044
static uint32_t compute_C_div(void)
1045
{
1046
    return 0;
1047
}
1048

    
1049
static inline uint32_t get_C_add_icc(uint32_t dst, uint32_t src1)
1050
{
1051
    uint32_t ret = 0;
1052

    
1053
    if (dst < src1) {
1054
        ret = PSR_CARRY;
1055
    }
1056
    return ret;
1057
}
1058

    
1059
static inline uint32_t get_C_addx_icc(uint32_t dst, uint32_t src1,
1060
                                      uint32_t src2)
1061
{
1062
    uint32_t ret = 0;
1063

    
1064
    if (((src1 & src2) | (~dst & (src1 | src2))) & (1U << 31)) {
1065
        ret = PSR_CARRY;
1066
    }
1067
    return ret;
1068
}
1069

    
1070
static inline uint32_t get_V_add_icc(uint32_t dst, uint32_t src1,
1071
                                     uint32_t src2)
1072
{
1073
    uint32_t ret = 0;
1074

    
1075
    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1U << 31)) {
1076
        ret = PSR_OVF;
1077
    }
1078
    return ret;
1079
}
1080

    
1081
#ifdef TARGET_SPARC64
1082
static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1)
1083
{
1084
    uint32_t ret = 0;
1085

    
1086
    if (dst < src1) {
1087
        ret = PSR_CARRY;
1088
    }
1089
    return ret;
1090
}
1091

    
1092
static inline uint32_t get_C_addx_xcc(target_ulong dst, target_ulong src1,
1093
                                      target_ulong src2)
1094
{
1095
    uint32_t ret = 0;
1096

    
1097
    if (((src1 & src2) | (~dst & (src1 | src2))) & (1ULL << 63)) {
1098
        ret = PSR_CARRY;
1099
    }
1100
    return ret;
1101
}
1102

    
1103
static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1,
1104
                                         target_ulong src2)
1105
{
1106
    uint32_t ret = 0;
1107

    
1108
    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63)) {
1109
        ret = PSR_OVF;
1110
    }
1111
    return ret;
1112
}
1113

    
1114
static uint32_t compute_all_add_xcc(void)
1115
{
1116
    uint32_t ret;
1117

    
1118
    ret = get_NZ_xcc(CC_DST);
1119
    ret |= get_C_add_xcc(CC_DST, CC_SRC);
1120
    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1121
    return ret;
1122
}
1123

    
1124
static uint32_t compute_C_add_xcc(void)
1125
{
1126
    return get_C_add_xcc(CC_DST, CC_SRC);
1127
}
1128
#endif
1129

    
1130
static uint32_t compute_all_add(void)
1131
{
1132
    uint32_t ret;
1133

    
1134
    ret = get_NZ_icc(CC_DST);
1135
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1136
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1137
    return ret;
1138
}
1139

    
1140
static uint32_t compute_C_add(void)
1141
{
1142
    return get_C_add_icc(CC_DST, CC_SRC);
1143
}
1144

    
1145
#ifdef TARGET_SPARC64
1146
static uint32_t compute_all_addx_xcc(void)
1147
{
1148
    uint32_t ret;
1149

    
1150
    ret = get_NZ_xcc(CC_DST);
1151
    ret |= get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1152
    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1153
    return ret;
1154
}
1155

    
1156
static uint32_t compute_C_addx_xcc(void)
1157
{
1158
    uint32_t ret;
1159

    
1160
    ret = get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1161
    return ret;
1162
}
1163
#endif
1164

    
1165
static uint32_t compute_all_addx(void)
1166
{
1167
    uint32_t ret;
1168

    
1169
    ret = get_NZ_icc(CC_DST);
1170
    ret |= get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1171
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1172
    return ret;
1173
}
1174

    
1175
static uint32_t compute_C_addx(void)
1176
{
1177
    uint32_t ret;
1178

    
1179
    ret = get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1180
    return ret;
1181
}
1182

    
1183
static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2)
1184
{
1185
    uint32_t ret = 0;
1186

    
1187
    if ((src1 | src2) & 0x3) {
1188
        ret = PSR_OVF;
1189
    }
1190
    return ret;
1191
}
1192

    
1193
static uint32_t compute_all_tadd(void)
1194
{
1195
    uint32_t ret;
1196

    
1197
    ret = get_NZ_icc(CC_DST);
1198
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1199
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1200
    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1201
    return ret;
1202
}
1203

    
1204
static uint32_t compute_all_taddtv(void)
1205
{
1206
    uint32_t ret;
1207

    
1208
    ret = get_NZ_icc(CC_DST);
1209
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1210
    return ret;
1211
}
1212

    
1213
static inline uint32_t get_C_sub_icc(uint32_t src1, uint32_t src2)
1214
{
1215
    uint32_t ret = 0;
1216

    
1217
    if (src1 < src2) {
1218
        ret = PSR_CARRY;
1219
    }
1220
    return ret;
1221
}
1222

    
1223
static inline uint32_t get_C_subx_icc(uint32_t dst, uint32_t src1,
1224
                                      uint32_t src2)
1225
{
1226
    uint32_t ret = 0;
1227

    
1228
    if (((~src1 & src2) | (dst & (~src1 | src2))) & (1U << 31)) {
1229
        ret = PSR_CARRY;
1230
    }
1231
    return ret;
1232
}
1233

    
1234
static inline uint32_t get_V_sub_icc(uint32_t dst, uint32_t src1,
1235
                                     uint32_t src2)
1236
{
1237
    uint32_t ret = 0;
1238

    
1239
    if (((src1 ^ src2) & (src1 ^ dst)) & (1U << 31)) {
1240
        ret = PSR_OVF;
1241
    }
1242
    return ret;
1243
}
1244

    
1245

    
1246
#ifdef TARGET_SPARC64
1247
static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2)
1248
{
1249
    uint32_t ret = 0;
1250

    
1251
    if (src1 < src2) {
1252
        ret = PSR_CARRY;
1253
    }
1254
    return ret;
1255
}
1256

    
1257
static inline uint32_t get_C_subx_xcc(target_ulong dst, target_ulong src1,
1258
                                      target_ulong src2)
1259
{
1260
    uint32_t ret = 0;
1261

    
1262
    if (((~src1 & src2) | (dst & (~src1 | src2))) & (1ULL << 63)) {
1263
        ret = PSR_CARRY;
1264
    }
1265
    return ret;
1266
}
1267

    
1268
static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1,
1269
                                     target_ulong src2)
1270
{
1271
    uint32_t ret = 0;
1272

    
1273
    if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63)) {
1274
        ret = PSR_OVF;
1275
    }
1276
    return ret;
1277
}
1278

    
1279
static uint32_t compute_all_sub_xcc(void)
1280
{
1281
    uint32_t ret;
1282

    
1283
    ret = get_NZ_xcc(CC_DST);
1284
    ret |= get_C_sub_xcc(CC_SRC, CC_SRC2);
1285
    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1286
    return ret;
1287
}
1288

    
1289
static uint32_t compute_C_sub_xcc(void)
1290
{
1291
    return get_C_sub_xcc(CC_SRC, CC_SRC2);
1292
}
1293
#endif
1294

    
1295
static uint32_t compute_all_sub(void)
1296
{
1297
    uint32_t ret;
1298

    
1299
    ret = get_NZ_icc(CC_DST);
1300
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1301
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1302
    return ret;
1303
}
1304

    
1305
static uint32_t compute_C_sub(void)
1306
{
1307
    return get_C_sub_icc(CC_SRC, CC_SRC2);
1308
}
1309

    
1310
#ifdef TARGET_SPARC64
1311
static uint32_t compute_all_subx_xcc(void)
1312
{
1313
    uint32_t ret;
1314

    
1315
    ret = get_NZ_xcc(CC_DST);
1316
    ret |= get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1317
    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1318
    return ret;
1319
}
1320

    
1321
static uint32_t compute_C_subx_xcc(void)
1322
{
1323
    uint32_t ret;
1324

    
1325
    ret = get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1326
    return ret;
1327
}
1328
#endif
1329

    
1330
static uint32_t compute_all_subx(void)
1331
{
1332
    uint32_t ret;
1333

    
1334
    ret = get_NZ_icc(CC_DST);
1335
    ret |= get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1336
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1337
    return ret;
1338
}
1339

    
1340
static uint32_t compute_C_subx(void)
1341
{
1342
    uint32_t ret;
1343

    
1344
    ret = get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1345
    return ret;
1346
}
1347

    
1348
static uint32_t compute_all_tsub(void)
1349
{
1350
    uint32_t ret;
1351

    
1352
    ret = get_NZ_icc(CC_DST);
1353
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1354
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1355
    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1356
    return ret;
1357
}
1358

    
1359
static uint32_t compute_all_tsubtv(void)
1360
{
1361
    uint32_t ret;
1362

    
1363
    ret = get_NZ_icc(CC_DST);
1364
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1365
    return ret;
1366
}
1367

    
1368
static uint32_t compute_all_logic(void)
1369
{
1370
    return get_NZ_icc(CC_DST);
1371
}
1372

    
1373
static uint32_t compute_C_logic(void)
1374
{
1375
    return 0;
1376
}
1377

    
1378
#ifdef TARGET_SPARC64
1379
static uint32_t compute_all_logic_xcc(void)
1380
{
1381
    return get_NZ_xcc(CC_DST);
1382
}
1383
#endif
1384

    
1385
typedef struct CCTable {
1386
    uint32_t (*compute_all)(void); /* return all the flags */
1387
    uint32_t (*compute_c)(void);  /* return the C flag */
1388
} CCTable;
1389

    
1390
static const CCTable icc_table[CC_OP_NB] = {
1391
    /* CC_OP_DYNAMIC should never happen */
1392
    [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags },
1393
    [CC_OP_DIV] = { compute_all_div, compute_C_div },
1394
    [CC_OP_ADD] = { compute_all_add, compute_C_add },
1395
    [CC_OP_ADDX] = { compute_all_addx, compute_C_addx },
1396
    [CC_OP_TADD] = { compute_all_tadd, compute_C_add },
1397
    [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_add },
1398
    [CC_OP_SUB] = { compute_all_sub, compute_C_sub },
1399
    [CC_OP_SUBX] = { compute_all_subx, compute_C_subx },
1400
    [CC_OP_TSUB] = { compute_all_tsub, compute_C_sub },
1401
    [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_sub },
1402
    [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic },
1403
};
1404

    
1405
#ifdef TARGET_SPARC64
1406
static const CCTable xcc_table[CC_OP_NB] = {
1407
    /* CC_OP_DYNAMIC should never happen */
1408
    [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc },
1409
    [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic },
1410
    [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc },
1411
    [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc },
1412
    [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc },
1413
    [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc },
1414
    [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1415
    [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc },
1416
    [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1417
    [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc },
1418
    [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic },
1419
};
1420
#endif
1421

    
1422
void helper_compute_psr(void)
1423
{
1424
    uint32_t new_psr;
1425

    
1426
    new_psr = icc_table[CC_OP].compute_all();
1427
    env->psr = new_psr;
1428
#ifdef TARGET_SPARC64
1429
    new_psr = xcc_table[CC_OP].compute_all();
1430
    env->xcc = new_psr;
1431
#endif
1432
    CC_OP = CC_OP_FLAGS;
1433
}
1434

    
1435
uint32_t helper_compute_C_icc(void)
1436
{
1437
    uint32_t ret;
1438

    
1439
    ret = icc_table[CC_OP].compute_c() >> PSR_CARRY_SHIFT;
1440
    return ret;
1441
}
1442

    
1443
static inline void memcpy32(target_ulong *dst, const target_ulong *src)
1444
{
1445
    dst[0] = src[0];
1446
    dst[1] = src[1];
1447
    dst[2] = src[2];
1448
    dst[3] = src[3];
1449
    dst[4] = src[4];
1450
    dst[5] = src[5];
1451
    dst[6] = src[6];
1452
    dst[7] = src[7];
1453
}
1454

    
1455
static void set_cwp(int new_cwp)
1456
{
1457
    /* put the modified wrap registers at their proper location */
1458
    if (env->cwp == env->nwindows - 1) {
1459
        memcpy32(env->regbase, env->regbase + env->nwindows * 16);
1460
    }
1461
    env->cwp = new_cwp;
1462

    
1463
    /* put the wrap registers at their temporary location */
1464
    if (new_cwp == env->nwindows - 1) {
1465
        memcpy32(env->regbase + env->nwindows * 16, env->regbase);
1466
    }
1467
    env->regwptr = env->regbase + (new_cwp * 16);
1468
}
1469

    
1470
void cpu_set_cwp(CPUState *env1, int new_cwp)
1471
{
1472
    CPUState *saved_env;
1473

    
1474
    saved_env = env;
1475
    env = env1;
1476
    set_cwp(new_cwp);
1477
    env = saved_env;
1478
}
1479

    
1480
static target_ulong get_psr(void)
1481
{
1482
    helper_compute_psr();
1483

    
1484
#if !defined (TARGET_SPARC64)
1485
    return env->version | (env->psr & PSR_ICC) |
1486
        (env->psref? PSR_EF : 0) |
1487
        (env->psrpil << 8) |
1488
        (env->psrs? PSR_S : 0) |
1489
        (env->psrps? PSR_PS : 0) |
1490
        (env->psret? PSR_ET : 0) | env->cwp;
1491
#else
1492
    return env->psr & PSR_ICC;
1493
#endif
1494
}
1495

    
1496
target_ulong cpu_get_psr(CPUState *env1)
1497
{
1498
    CPUState *saved_env;
1499
    target_ulong ret;
1500

    
1501
    saved_env = env;
1502
    env = env1;
1503
    ret = get_psr();
1504
    env = saved_env;
1505
    return ret;
1506
}
1507

    
1508
static void put_psr(target_ulong val)
1509
{
1510
    env->psr = val & PSR_ICC;
1511
#if !defined (TARGET_SPARC64)
1512
    env->psref = (val & PSR_EF)? 1 : 0;
1513
    env->psrpil = (val & PSR_PIL) >> 8;
1514
#endif
1515
#if ((!defined (TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY))
1516
    cpu_check_irqs(env);
1517
#endif
1518
#if !defined (TARGET_SPARC64)
1519
    env->psrs = (val & PSR_S)? 1 : 0;
1520
    env->psrps = (val & PSR_PS)? 1 : 0;
1521
    env->psret = (val & PSR_ET)? 1 : 0;
1522
    set_cwp(val & PSR_CWP);
1523
#endif
1524
    env->cc_op = CC_OP_FLAGS;
1525
}
1526

    
1527
void cpu_put_psr(CPUState *env1, target_ulong val)
1528
{
1529
    CPUState *saved_env;
1530

    
1531
    saved_env = env;
1532
    env = env1;
1533
    put_psr(val);
1534
    env = saved_env;
1535
}
1536

    
1537
static int cwp_inc(int cwp)
1538
{
1539
    if (unlikely(cwp >= env->nwindows)) {
1540
        cwp -= env->nwindows;
1541
    }
1542
    return cwp;
1543
}
1544

    
1545
int cpu_cwp_inc(CPUState *env1, int cwp)
1546
{
1547
    CPUState *saved_env;
1548
    target_ulong ret;
1549

    
1550
    saved_env = env;
1551
    env = env1;
1552
    ret = cwp_inc(cwp);
1553
    env = saved_env;
1554
    return ret;
1555
}
1556

    
1557
static int cwp_dec(int cwp)
1558
{
1559
    if (unlikely(cwp < 0)) {
1560
        cwp += env->nwindows;
1561
    }
1562
    return cwp;
1563
}
1564

    
1565
int cpu_cwp_dec(CPUState *env1, int cwp)
1566
{
1567
    CPUState *saved_env;
1568
    target_ulong ret;
1569

    
1570
    saved_env = env;
1571
    env = env1;
1572
    ret = cwp_dec(cwp);
1573
    env = saved_env;
1574
    return ret;
1575
}
1576

    
1577
#ifdef TARGET_SPARC64
1578
GEN_FCMPS(fcmps_fcc1, float32, 22, 0);
1579
GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
1580
GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
1581

    
1582
GEN_FCMPS(fcmps_fcc2, float32, 24, 0);
1583
GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
1584
GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
1585

    
1586
GEN_FCMPS(fcmps_fcc3, float32, 26, 0);
1587
GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
1588
GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
1589

    
1590
GEN_FCMPS(fcmpes_fcc1, float32, 22, 1);
1591
GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
1592
GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
1593

    
1594
GEN_FCMPS(fcmpes_fcc2, float32, 24, 1);
1595
GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
1596
GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
1597

    
1598
GEN_FCMPS(fcmpes_fcc3, float32, 26, 1);
1599
GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
1600
GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
1601
#endif
1602
#undef GEN_FCMPS
1603

    
1604
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
1605
    defined(DEBUG_MXCC)
1606
static void dump_mxcc(CPUState *env)
1607
{
1608
    printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1609
           "\n",
1610
           env->mxccdata[0], env->mxccdata[1],
1611
           env->mxccdata[2], env->mxccdata[3]);
1612
    printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1613
           "\n"
1614
           "          %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1615
           "\n",
1616
           env->mxccregs[0], env->mxccregs[1],
1617
           env->mxccregs[2], env->mxccregs[3],
1618
           env->mxccregs[4], env->mxccregs[5],
1619
           env->mxccregs[6], env->mxccregs[7]);
1620
}
1621
#endif
1622

    
1623
#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
1624
    && defined(DEBUG_ASI)
1625
static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
1626
                     uint64_t r1)
1627
{
1628
    switch (size)
1629
    {
1630
    case 1:
1631
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
1632
                    addr, asi, r1 & 0xff);
1633
        break;
1634
    case 2:
1635
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
1636
                    addr, asi, r1 & 0xffff);
1637
        break;
1638
    case 4:
1639
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
1640
                    addr, asi, r1 & 0xffffffff);
1641
        break;
1642
    case 8:
1643
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
1644
                    addr, asi, r1);
1645
        break;
1646
    }
1647
}
1648
#endif
1649

    
1650
#ifndef TARGET_SPARC64
1651
#ifndef CONFIG_USER_ONLY
1652

    
1653

    
1654
/* Leon3 cache control */
1655

    
1656
static void leon3_cache_control_int(void)
1657
{
1658
    uint32_t state = 0;
1659

    
1660
    if (env->cache_control & CACHE_CTRL_IF) {
1661
        /* Instruction cache state */
1662
        state = env->cache_control & CACHE_STATE_MASK;
1663
        if (state == CACHE_ENABLED) {
1664
            state = CACHE_FROZEN;
1665
            DPRINTF_CACHE_CONTROL("Instruction cache: freeze\n");
1666
        }
1667

    
1668
        env->cache_control &= ~CACHE_STATE_MASK;
1669
        env->cache_control |= state;
1670
    }
1671

    
1672
    if (env->cache_control & CACHE_CTRL_DF) {
1673
        /* Data cache state */
1674
        state = (env->cache_control >> 2) & CACHE_STATE_MASK;
1675
        if (state == CACHE_ENABLED) {
1676
            state = CACHE_FROZEN;
1677
            DPRINTF_CACHE_CONTROL("Data cache: freeze\n");
1678
        }
1679

    
1680
        env->cache_control &= ~(CACHE_STATE_MASK << 2);
1681
        env->cache_control |= (state << 2);
1682
    }
1683
}
1684

    
1685
static void leon3_cache_control_st(target_ulong addr, uint64_t val, int size)
1686
{
1687
    DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
1688
                          addr, val, size);
1689

    
1690
    if (size != 4) {
1691
        DPRINTF_CACHE_CONTROL("32bits only\n");
1692
        return;
1693
    }
1694

    
1695
    switch (addr) {
1696
    case 0x00:              /* Cache control */
1697

    
1698
        /* These values must always be read as zeros */
1699
        val &= ~CACHE_CTRL_FD;
1700
        val &= ~CACHE_CTRL_FI;
1701
        val &= ~CACHE_CTRL_IB;
1702
        val &= ~CACHE_CTRL_IP;
1703
        val &= ~CACHE_CTRL_DP;
1704

    
1705
        env->cache_control = val;
1706
        break;
1707
    case 0x04:              /* Instruction cache configuration */
1708
    case 0x08:              /* Data cache configuration */
1709
        /* Read Only */
1710
        break;
1711
    default:
1712
        DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
1713
        break;
1714
    };
1715
}
1716

    
1717
static uint64_t leon3_cache_control_ld(target_ulong addr, int size)
1718
{
1719
    uint64_t ret = 0;
1720

    
1721
    if (size != 4) {
1722
        DPRINTF_CACHE_CONTROL("32bits only\n");
1723
        return 0;
1724
    }
1725

    
1726
    switch (addr) {
1727
    case 0x00:              /* Cache control */
1728
        ret = env->cache_control;
1729
        break;
1730

    
1731
        /* Configuration registers are read and only always keep those
1732
           predefined values */
1733

    
1734
    case 0x04:              /* Instruction cache configuration */
1735
        ret = 0x10220000;
1736
        break;
1737
    case 0x08:              /* Data cache configuration */
1738
        ret = 0x18220000;
1739
        break;
1740
    default:
1741
        DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
1742
        break;
1743
    };
1744
    DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
1745
                          addr, ret, size);
1746
    return ret;
1747
}
1748

    
1749
void leon3_irq_manager(void *irq_manager, int intno)
1750
{
1751
    leon3_irq_ack(irq_manager, intno);
1752
    leon3_cache_control_int();
1753
}
1754

    
1755
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1756
{
1757
    uint64_t ret = 0;
1758
#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
1759
    uint32_t last_addr = addr;
1760
#endif
1761

    
1762
    helper_check_align(addr, size - 1);
1763
    switch (asi) {
1764
    case 2: /* SuperSparc MXCC registers and Leon3 cache control */
1765
        switch (addr) {
1766
        case 0x00:          /* Leon3 Cache Control */
1767
        case 0x08:          /* Leon3 Instruction Cache config */
1768
        case 0x0C:          /* Leon3 Date Cache config */
1769
            if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
1770
                ret = leon3_cache_control_ld(addr, size);
1771
            }
1772
            break;
1773
        case 0x01c00a00: /* MXCC control register */
1774
            if (size == 8)
1775
                ret = env->mxccregs[3];
1776
            else
1777
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1778
                             size);
1779
            break;
1780
        case 0x01c00a04: /* MXCC control register */
1781
            if (size == 4)
1782
                ret = env->mxccregs[3];
1783
            else
1784
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1785
                             size);
1786
            break;
1787
        case 0x01c00c00: /* Module reset register */
1788
            if (size == 8) {
1789
                ret = env->mxccregs[5];
1790
                // should we do something here?
1791
            } else
1792
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1793
                             size);
1794
            break;
1795
        case 0x01c00f00: /* MBus port address register */
1796
            if (size == 8)
1797
                ret = env->mxccregs[7];
1798
            else
1799
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1800
                             size);
1801
            break;
1802
        default:
1803
            DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1804
                         size);
1805
            break;
1806
        }
1807
        DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
1808
                     "addr = %08x -> ret = %" PRIx64 ","
1809
                     "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
1810
#ifdef DEBUG_MXCC
1811
        dump_mxcc(env);
1812
#endif
1813
        break;
1814
    case 3: /* MMU probe */
1815
        {
1816
            int mmulev;
1817

    
1818
            mmulev = (addr >> 8) & 15;
1819
            if (mmulev > 4)
1820
                ret = 0;
1821
            else
1822
                ret = mmu_probe(env, addr, mmulev);
1823
            DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
1824
                        addr, mmulev, ret);
1825
        }
1826
        break;
1827
    case 4: /* read MMU regs */
1828
        {
1829
            int reg = (addr >> 8) & 0x1f;
1830

    
1831
            ret = env->mmuregs[reg];
1832
            if (reg == 3) /* Fault status cleared on read */
1833
                env->mmuregs[3] = 0;
1834
            else if (reg == 0x13) /* Fault status read */
1835
                ret = env->mmuregs[3];
1836
            else if (reg == 0x14) /* Fault address read */
1837
                ret = env->mmuregs[4];
1838
            DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
1839
        }
1840
        break;
1841
    case 5: // Turbosparc ITLB Diagnostic
1842
    case 6: // Turbosparc DTLB Diagnostic
1843
    case 7: // Turbosparc IOTLB Diagnostic
1844
        break;
1845
    case 9: /* Supervisor code access */
1846
        switch(size) {
1847
        case 1:
1848
            ret = ldub_code(addr);
1849
            break;
1850
        case 2:
1851
            ret = lduw_code(addr);
1852
            break;
1853
        default:
1854
        case 4:
1855
            ret = ldl_code(addr);
1856
            break;
1857
        case 8:
1858
            ret = ldq_code(addr);
1859
            break;
1860
        }
1861
        break;
1862
    case 0xa: /* User data access */
1863
        switch(size) {
1864
        case 1:
1865
            ret = ldub_user(addr);
1866
            break;
1867
        case 2:
1868
            ret = lduw_user(addr);
1869
            break;
1870
        default:
1871
        case 4:
1872
            ret = ldl_user(addr);
1873
            break;
1874
        case 8:
1875
            ret = ldq_user(addr);
1876
            break;
1877
        }
1878
        break;
1879
    case 0xb: /* Supervisor data access */
1880
        switch(size) {
1881
        case 1:
1882
            ret = ldub_kernel(addr);
1883
            break;
1884
        case 2:
1885
            ret = lduw_kernel(addr);
1886
            break;
1887
        default:
1888
        case 4:
1889
            ret = ldl_kernel(addr);
1890
            break;
1891
        case 8:
1892
            ret = ldq_kernel(addr);
1893
            break;
1894
        }
1895
        break;
1896
    case 0xc: /* I-cache tag */
1897
    case 0xd: /* I-cache data */
1898
    case 0xe: /* D-cache tag */
1899
    case 0xf: /* D-cache data */
1900
        break;
1901
    case 0x20: /* MMU passthrough */
1902
        switch(size) {
1903
        case 1:
1904
            ret = ldub_phys(addr);
1905
            break;
1906
        case 2:
1907
            ret = lduw_phys(addr);
1908
            break;
1909
        default:
1910
        case 4:
1911
            ret = ldl_phys(addr);
1912
            break;
1913
        case 8:
1914
            ret = ldq_phys(addr);
1915
            break;
1916
        }
1917
        break;
1918
    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1919
        switch(size) {
1920
        case 1:
1921
            ret = ldub_phys((target_phys_addr_t)addr
1922
                            | ((target_phys_addr_t)(asi & 0xf) << 32));
1923
            break;
1924
        case 2:
1925
            ret = lduw_phys((target_phys_addr_t)addr
1926
                            | ((target_phys_addr_t)(asi & 0xf) << 32));
1927
            break;
1928
        default:
1929
        case 4:
1930
            ret = ldl_phys((target_phys_addr_t)addr
1931
                           | ((target_phys_addr_t)(asi & 0xf) << 32));
1932
            break;
1933
        case 8:
1934
            ret = ldq_phys((target_phys_addr_t)addr
1935
                           | ((target_phys_addr_t)(asi & 0xf) << 32));
1936
            break;
1937
        }
1938
        break;
1939
    case 0x30: // Turbosparc secondary cache diagnostic
1940
    case 0x31: // Turbosparc RAM snoop
1941
    case 0x32: // Turbosparc page table descriptor diagnostic
1942
    case 0x39: /* data cache diagnostic register */
1943
        ret = 0;
1944
        break;
1945
    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
1946
        {
1947
            int reg = (addr >> 8) & 3;
1948

    
1949
            switch(reg) {
1950
            case 0: /* Breakpoint Value (Addr) */
1951
                ret = env->mmubpregs[reg];
1952
                break;
1953
            case 1: /* Breakpoint Mask */
1954
                ret = env->mmubpregs[reg];
1955
                break;
1956
            case 2: /* Breakpoint Control */
1957
                ret = env->mmubpregs[reg];
1958
                break;
1959
            case 3: /* Breakpoint Status */
1960
                ret = env->mmubpregs[reg];
1961
                env->mmubpregs[reg] = 0ULL;
1962
                break;
1963
            }
1964
            DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
1965
                        ret);
1966
        }
1967
        break;
1968
    case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
1969
        ret = env->mmubpctrv;
1970
        break;
1971
    case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
1972
        ret = env->mmubpctrc;
1973
        break;
1974
    case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
1975
        ret = env->mmubpctrs;
1976
        break;
1977
    case 0x4c: /* SuperSPARC MMU Breakpoint Action */
1978
        ret = env->mmubpaction;
1979
        break;
1980
    case 8: /* User code access, XXX */
1981
    default:
1982
        do_unassigned_access(addr, 0, 0, asi, size);
1983
        ret = 0;
1984
        break;
1985
    }
1986
    if (sign) {
1987
        switch(size) {
1988
        case 1:
1989
            ret = (int8_t) ret;
1990
            break;
1991
        case 2:
1992
            ret = (int16_t) ret;
1993
            break;
1994
        case 4:
1995
            ret = (int32_t) ret;
1996
            break;
1997
        default:
1998
            break;
1999
        }
2000
    }
2001
#ifdef DEBUG_ASI
2002
    dump_asi("read ", last_addr, asi, size, ret);
2003
#endif
2004
    return ret;
2005
}
2006

    
2007
void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
2008
{
2009
    helper_check_align(addr, size - 1);
2010
    switch(asi) {
2011
    case 2: /* SuperSparc MXCC registers and Leon3 cache control */
2012
        switch (addr) {
2013
        case 0x00:          /* Leon3 Cache Control */
2014
        case 0x08:          /* Leon3 Instruction Cache config */
2015
        case 0x0C:          /* Leon3 Date Cache config */
2016
            if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
2017
                leon3_cache_control_st(addr, val, size);
2018
            }
2019
            break;
2020

    
2021
        case 0x01c00000: /* MXCC stream data register 0 */
2022
            if (size == 8)
2023
                env->mxccdata[0] = val;
2024
            else
2025
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2026
                             size);
2027
            break;
2028
        case 0x01c00008: /* MXCC stream data register 1 */
2029
            if (size == 8)
2030
                env->mxccdata[1] = val;
2031
            else
2032
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2033
                             size);
2034
            break;
2035
        case 0x01c00010: /* MXCC stream data register 2 */
2036
            if (size == 8)
2037
                env->mxccdata[2] = val;
2038
            else
2039
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2040
                             size);
2041
            break;
2042
        case 0x01c00018: /* MXCC stream data register 3 */
2043
            if (size == 8)
2044
                env->mxccdata[3] = val;
2045
            else
2046
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2047
                             size);
2048
            break;
2049
        case 0x01c00100: /* MXCC stream source */
2050
            if (size == 8)
2051
                env->mxccregs[0] = val;
2052
            else
2053
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2054
                             size);
2055
            env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2056
                                        0);
2057
            env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2058
                                        8);
2059
            env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2060
                                        16);
2061
            env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2062
                                        24);
2063
            break;
2064
        case 0x01c00200: /* MXCC stream destination */
2065
            if (size == 8)
2066
                env->mxccregs[1] = val;
2067
            else
2068
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2069
                             size);
2070
            stq_phys((env->mxccregs[1] & 0xffffffffULL) +  0,
2071
                     env->mxccdata[0]);
2072
            stq_phys((env->mxccregs[1] & 0xffffffffULL) +  8,
2073
                     env->mxccdata[1]);
2074
            stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
2075
                     env->mxccdata[2]);
2076
            stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
2077
                     env->mxccdata[3]);
2078
            break;
2079
        case 0x01c00a00: /* MXCC control register */
2080
            if (size == 8)
2081
                env->mxccregs[3] = val;
2082
            else
2083
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2084
                             size);
2085
            break;
2086
        case 0x01c00a04: /* MXCC control register */
2087
            if (size == 4)
2088
                env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
2089
                    | val;
2090
            else
2091
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2092
                             size);
2093
            break;
2094
        case 0x01c00e00: /* MXCC error register  */
2095
            // writing a 1 bit clears the error
2096
            if (size == 8)
2097
                env->mxccregs[6] &= ~val;
2098
            else
2099
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2100
                             size);
2101
            break;
2102
        case 0x01c00f00: /* MBus port address register */
2103
            if (size == 8)
2104
                env->mxccregs[7] = val;
2105
            else
2106
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2107
                             size);
2108
            break;
2109
        default:
2110
            DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
2111
                         size);
2112
            break;
2113
        }
2114
        DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
2115
                     asi, size, addr, val);
2116
#ifdef DEBUG_MXCC
2117
        dump_mxcc(env);
2118
#endif
2119
        break;
2120
    case 3: /* MMU flush */
2121
        {
2122
            int mmulev;
2123

    
2124
            mmulev = (addr >> 8) & 15;
2125
            DPRINTF_MMU("mmu flush level %d\n", mmulev);
2126
            switch (mmulev) {
2127
            case 0: // flush page
2128
                tlb_flush_page(env, addr & 0xfffff000);
2129
                break;
2130
            case 1: // flush segment (256k)
2131
            case 2: // flush region (16M)
2132
            case 3: // flush context (4G)
2133
            case 4: // flush entire
2134
                tlb_flush(env, 1);
2135
                break;
2136
            default:
2137
                break;
2138
            }
2139
#ifdef DEBUG_MMU
2140
            dump_mmu(stdout, fprintf, env);
2141
#endif
2142
        }
2143
        break;
2144
    case 4: /* write MMU regs */
2145
        {
2146
            int reg = (addr >> 8) & 0x1f;
2147
            uint32_t oldreg;
2148

    
2149
            oldreg = env->mmuregs[reg];
2150
            switch(reg) {
2151
            case 0: // Control Register
2152
                env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
2153
                                    (val & 0x00ffffff);
2154
                // Mappings generated during no-fault mode or MMU
2155
                // disabled mode are invalid in normal mode
2156
                if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
2157
                    (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
2158
                    tlb_flush(env, 1);
2159
                break;
2160
            case 1: // Context Table Pointer Register
2161
                env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
2162
                break;
2163
            case 2: // Context Register
2164
                env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
2165
                if (oldreg != env->mmuregs[reg]) {
2166
                    /* we flush when the MMU context changes because
2167
                       QEMU has no MMU context support */
2168
                    tlb_flush(env, 1);
2169
                }
2170
                break;
2171
            case 3: // Synchronous Fault Status Register with Clear
2172
            case 4: // Synchronous Fault Address Register
2173
                break;
2174
            case 0x10: // TLB Replacement Control Register
2175
                env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
2176
                break;
2177
            case 0x13: // Synchronous Fault Status Register with Read and Clear
2178
                env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
2179
                break;
2180
            case 0x14: // Synchronous Fault Address Register
2181
                env->mmuregs[4] = val;
2182
                break;
2183
            default:
2184
                env->mmuregs[reg] = val;
2185
                break;
2186
            }
2187
            if (oldreg != env->mmuregs[reg]) {
2188
                DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
2189
                            reg, oldreg, env->mmuregs[reg]);
2190
            }
2191
#ifdef DEBUG_MMU
2192
            dump_mmu(stdout, fprintf, env);
2193
#endif
2194
        }
2195
        break;
2196
    case 5: // Turbosparc ITLB Diagnostic
2197
    case 6: // Turbosparc DTLB Diagnostic
2198
    case 7: // Turbosparc IOTLB Diagnostic
2199
        break;
2200
    case 0xa: /* User data access */
2201
        switch(size) {
2202
        case 1:
2203
            stb_user(addr, val);
2204
            break;
2205
        case 2:
2206
            stw_user(addr, val);
2207
            break;
2208
        default:
2209
        case 4:
2210
            stl_user(addr, val);
2211
            break;
2212
        case 8:
2213
            stq_user(addr, val);
2214
            break;
2215
        }
2216
        break;
2217
    case 0xb: /* Supervisor data access */
2218
        switch(size) {
2219
        case 1:
2220
            stb_kernel(addr, val);
2221
            break;
2222
        case 2:
2223
            stw_kernel(addr, val);
2224
            break;
2225
        default:
2226
        case 4:
2227
            stl_kernel(addr, val);
2228
            break;
2229
        case 8:
2230
            stq_kernel(addr, val);
2231
            break;
2232
        }
2233
        break;
2234
    case 0xc: /* I-cache tag */
2235
    case 0xd: /* I-cache data */
2236
    case 0xe: /* D-cache tag */
2237
    case 0xf: /* D-cache data */
2238
    case 0x10: /* I/D-cache flush page */
2239
    case 0x11: /* I/D-cache flush segment */
2240
    case 0x12: /* I/D-cache flush region */
2241
    case 0x13: /* I/D-cache flush context */
2242
    case 0x14: /* I/D-cache flush user */
2243
        break;
2244
    case 0x17: /* Block copy, sta access */
2245
        {
2246
            // val = src
2247
            // addr = dst
2248
            // copy 32 bytes
2249
            unsigned int i;
2250
            uint32_t src = val & ~3, dst = addr & ~3, temp;
2251

    
2252
            for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
2253
                temp = ldl_kernel(src);
2254
                stl_kernel(dst, temp);
2255
            }
2256
        }
2257
        break;
2258
    case 0x1f: /* Block fill, stda access */
2259
        {
2260
            // addr = dst
2261
            // fill 32 bytes with val
2262
            unsigned int i;
2263
            uint32_t dst = addr & 7;
2264

    
2265
            for (i = 0; i < 32; i += 8, dst += 8)
2266
                stq_kernel(dst, val);
2267
        }
2268
        break;
2269
    case 0x20: /* MMU passthrough */
2270
        {
2271
            switch(size) {
2272
            case 1:
2273
                stb_phys(addr, val);
2274
                break;
2275
            case 2:
2276
                stw_phys(addr, val);
2277
                break;
2278
            case 4:
2279
            default:
2280
                stl_phys(addr, val);
2281
                break;
2282
            case 8:
2283
                stq_phys(addr, val);
2284
                break;
2285
            }
2286
        }
2287
        break;
2288
    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
2289
        {
2290
            switch(size) {
2291
            case 1:
2292
                stb_phys((target_phys_addr_t)addr
2293
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2294
                break;
2295
            case 2:
2296
                stw_phys((target_phys_addr_t)addr
2297
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2298
                break;
2299
            case 4:
2300
            default:
2301
                stl_phys((target_phys_addr_t)addr
2302
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2303
                break;
2304
            case 8:
2305
                stq_phys((target_phys_addr_t)addr
2306
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2307
                break;
2308
            }
2309
        }
2310
        break;
2311
    case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
2312
    case 0x31: // store buffer data, Ross RT620 I-cache flush or
2313
               // Turbosparc snoop RAM
2314
    case 0x32: // store buffer control or Turbosparc page table
2315
               // descriptor diagnostic
2316
    case 0x36: /* I-cache flash clear */
2317
    case 0x37: /* D-cache flash clear */
2318
        break;
2319
    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
2320
        {
2321
            int reg = (addr >> 8) & 3;
2322

    
2323
            switch(reg) {
2324
            case 0: /* Breakpoint Value (Addr) */
2325
                env->mmubpregs[reg] = (val & 0xfffffffffULL);
2326
                break;
2327
            case 1: /* Breakpoint Mask */
2328
                env->mmubpregs[reg] = (val & 0xfffffffffULL);
2329
                break;
2330
            case 2: /* Breakpoint Control */
2331
                env->mmubpregs[reg] = (val & 0x7fULL);
2332
                break;
2333
            case 3: /* Breakpoint Status */
2334
                env->mmubpregs[reg] = (val & 0xfULL);
2335
                break;
2336
            }
2337
            DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
2338
                        env->mmuregs[reg]);
2339
        }
2340
        break;
2341
    case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
2342
        env->mmubpctrv = val & 0xffffffff;
2343
        break;
2344
    case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
2345
        env->mmubpctrc = val & 0x3;
2346
        break;
2347
    case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
2348
        env->mmubpctrs = val & 0x3;
2349
        break;
2350
    case 0x4c: /* SuperSPARC MMU Breakpoint Action */
2351
        env->mmubpaction = val & 0x1fff;
2352
        break;
2353
    case 8: /* User code access, XXX */
2354
    case 9: /* Supervisor code access, XXX */
2355
    default:
2356
        do_unassigned_access(addr, 1, 0, asi, size);
2357
        break;
2358
    }
2359
#ifdef DEBUG_ASI
2360
    dump_asi("write", addr, asi, size, val);
2361
#endif
2362
}
2363

    
2364
#endif /* CONFIG_USER_ONLY */
2365
#else /* TARGET_SPARC64 */
2366

    
2367
#ifdef CONFIG_USER_ONLY
2368
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2369
{
2370
    uint64_t ret = 0;
2371
#if defined(DEBUG_ASI)
2372
    target_ulong last_addr = addr;
2373
#endif
2374

    
2375
    if (asi < 0x80)
2376
        raise_exception(TT_PRIV_ACT);
2377

    
2378
    helper_check_align(addr, size - 1);
2379
    addr = asi_address_mask(env, asi, addr);
2380

    
2381
    switch (asi) {
2382
    case 0x82: // Primary no-fault
2383
    case 0x8a: // Primary no-fault LE
2384
        if (page_check_range(addr, size, PAGE_READ) == -1) {
2385
#ifdef DEBUG_ASI
2386
            dump_asi("read ", last_addr, asi, size, ret);
2387
#endif
2388
            return 0;
2389
        }
2390
        // Fall through
2391
    case 0x80: // Primary
2392
    case 0x88: // Primary LE
2393
        {
2394
            switch(size) {
2395
            case 1:
2396
                ret = ldub_raw(addr);
2397
                break;
2398
            case 2:
2399
                ret = lduw_raw(addr);
2400
                break;
2401
            case 4:
2402
                ret = ldl_raw(addr);
2403
                break;
2404
            default:
2405
            case 8:
2406
                ret = ldq_raw(addr);
2407
                break;
2408
            }
2409
        }
2410
        break;
2411
    case 0x83: // Secondary no-fault
2412
    case 0x8b: // Secondary no-fault LE
2413
        if (page_check_range(addr, size, PAGE_READ) == -1) {
2414
#ifdef DEBUG_ASI
2415
            dump_asi("read ", last_addr, asi, size, ret);
2416
#endif
2417
            return 0;
2418
        }
2419
        // Fall through
2420
    case 0x81: // Secondary
2421
    case 0x89: // Secondary LE
2422
        // XXX
2423
        break;
2424
    default:
2425
        break;
2426
    }
2427

    
2428
    /* Convert from little endian */
2429
    switch (asi) {
2430
    case 0x88: // Primary LE
2431
    case 0x89: // Secondary LE
2432
    case 0x8a: // Primary no-fault LE
2433
    case 0x8b: // Secondary no-fault LE
2434
        switch(size) {
2435
        case 2:
2436
            ret = bswap16(ret);
2437
            break;
2438
        case 4:
2439
            ret = bswap32(ret);
2440
            break;
2441
        case 8:
2442
            ret = bswap64(ret);
2443
            break;
2444
        default:
2445
            break;
2446
        }
2447
    default:
2448
        break;
2449
    }
2450

    
2451
    /* Convert to signed number */
2452
    if (sign) {
2453
        switch(size) {
2454
        case 1:
2455
            ret = (int8_t) ret;
2456
            break;
2457
        case 2:
2458
            ret = (int16_t) ret;
2459
            break;
2460
        case 4:
2461
            ret = (int32_t) ret;
2462
            break;
2463
        default:
2464
            break;
2465
        }
2466
    }
2467
#ifdef DEBUG_ASI
2468
    dump_asi("read ", last_addr, asi, size, ret);
2469
#endif
2470
    return ret;
2471
}
2472

    
2473
void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2474
{
2475
#ifdef DEBUG_ASI
2476
    dump_asi("write", addr, asi, size, val);
2477
#endif
2478
    if (asi < 0x80)
2479
        raise_exception(TT_PRIV_ACT);
2480

    
2481
    helper_check_align(addr, size - 1);
2482
    addr = asi_address_mask(env, asi, addr);
2483

    
2484
    /* Convert to little endian */
2485
    switch (asi) {
2486
    case 0x88: // Primary LE
2487
    case 0x89: // Secondary LE
2488
        switch(size) {
2489
        case 2:
2490
            val = bswap16(val);
2491
            break;
2492
        case 4:
2493
            val = bswap32(val);
2494
            break;
2495
        case 8:
2496
            val = bswap64(val);
2497
            break;
2498
        default:
2499
            break;
2500
        }
2501
    default:
2502
        break;
2503
    }
2504

    
2505
    switch(asi) {
2506
    case 0x80: // Primary
2507
    case 0x88: // Primary LE
2508
        {
2509
            switch(size) {
2510
            case 1:
2511
                stb_raw(addr, val);
2512
                break;
2513
            case 2:
2514
                stw_raw(addr, val);
2515
                break;
2516
            case 4:
2517
                stl_raw(addr, val);
2518
                break;
2519
            case 8:
2520
            default:
2521
                stq_raw(addr, val);
2522
                break;
2523
            }
2524
        }
2525
        break;
2526
    case 0x81: // Secondary
2527
    case 0x89: // Secondary LE
2528
        // XXX
2529
        return;
2530

    
2531
    case 0x82: // Primary no-fault, RO
2532
    case 0x83: // Secondary no-fault, RO
2533
    case 0x8a: // Primary no-fault LE, RO
2534
    case 0x8b: // Secondary no-fault LE, RO
2535
    default:
2536
        do_unassigned_access(addr, 1, 0, 1, size);
2537
        return;
2538
    }
2539
}
2540

    
2541
#else /* CONFIG_USER_ONLY */
2542

    
2543
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2544
{
2545
    uint64_t ret = 0;
2546
#if defined(DEBUG_ASI)
2547
    target_ulong last_addr = addr;
2548
#endif
2549

    
2550
    asi &= 0xff;
2551

    
2552
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2553
        || (cpu_has_hypervisor(env)
2554
            && asi >= 0x30 && asi < 0x80
2555
            && !(env->hpstate & HS_PRIV)))
2556
        raise_exception(TT_PRIV_ACT);
2557

    
2558
    helper_check_align(addr, size - 1);
2559
    addr = asi_address_mask(env, asi, addr);
2560

    
2561
    switch (asi) {
2562
    case 0x82: // Primary no-fault
2563
    case 0x8a: // Primary no-fault LE
2564
    case 0x83: // Secondary no-fault
2565
    case 0x8b: // Secondary no-fault LE
2566
        {
2567
            /* secondary space access has lowest asi bit equal to 1 */
2568
            int access_mmu_idx = ( asi & 1 ) ? MMU_KERNEL_IDX
2569
                                             : MMU_KERNEL_SECONDARY_IDX;
2570

    
2571
            if (cpu_get_phys_page_nofault(env, addr, access_mmu_idx) == -1ULL) {
2572
#ifdef DEBUG_ASI
2573
                dump_asi("read ", last_addr, asi, size, ret);
2574
#endif
2575
                return 0;
2576
            }
2577
        }
2578
        // Fall through
2579
    case 0x10: // As if user primary
2580
    case 0x11: // As if user secondary
2581
    case 0x18: // As if user primary LE
2582
    case 0x19: // As if user secondary LE
2583
    case 0x80: // Primary
2584
    case 0x81: // Secondary
2585
    case 0x88: // Primary LE
2586
    case 0x89: // Secondary LE
2587
    case 0xe2: // UA2007 Primary block init
2588
    case 0xe3: // UA2007 Secondary block init
2589
        if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2590
            if (cpu_hypervisor_mode(env)) {
2591
                switch(size) {
2592
                case 1:
2593
                    ret = ldub_hypv(addr);
2594
                    break;
2595
                case 2:
2596
                    ret = lduw_hypv(addr);
2597
                    break;
2598
                case 4:
2599
                    ret = ldl_hypv(addr);
2600
                    break;
2601
                default:
2602
                case 8:
2603
                    ret = ldq_hypv(addr);
2604
                    break;
2605
                }
2606
            } else {
2607
                /* secondary space access has lowest asi bit equal to 1 */
2608
                if (asi & 1) {
2609
                    switch(size) {
2610
                    case 1:
2611
                        ret = ldub_kernel_secondary(addr);
2612
                        break;
2613
                    case 2:
2614
                        ret = lduw_kernel_secondary(addr);
2615
                        break;
2616
                    case 4:
2617
                        ret = ldl_kernel_secondary(addr);
2618
                        break;
2619
                    default:
2620
                    case 8:
2621
                        ret = ldq_kernel_secondary(addr);
2622
                        break;
2623
                    }
2624
                } else {
2625
                    switch(size) {
2626
                    case 1:
2627
                        ret = ldub_kernel(addr);
2628
                        break;
2629
                    case 2:
2630
                        ret = lduw_kernel(addr);
2631
                        break;
2632
                    case 4:
2633
                        ret = ldl_kernel(addr);
2634
                        break;
2635
                    default:
2636
                    case 8:
2637
                        ret = ldq_kernel(addr);
2638
                        break;
2639
                    }
2640
                }
2641
            }
2642
        } else {
2643
            /* secondary space access has lowest asi bit equal to 1 */
2644
            if (asi & 1) {
2645
                switch(size) {
2646
                case 1:
2647
                    ret = ldub_user_secondary(addr);
2648
                    break;
2649
                case 2:
2650
                    ret = lduw_user_secondary(addr);
2651
                    break;
2652
                case 4:
2653
                    ret = ldl_user_secondary(addr);
2654
                    break;
2655
                default:
2656
                case 8:
2657
                    ret = ldq_user_secondary(addr);
2658
                    break;
2659
                }
2660
            } else {
2661
                switch(size) {
2662
                case 1:
2663
                    ret = ldub_user(addr);
2664
                    break;
2665
                case 2:
2666
                    ret = lduw_user(addr);
2667
                    break;
2668
                case 4:
2669
                    ret = ldl_user(addr);
2670
                    break;
2671
                default:
2672
                case 8:
2673
                    ret = ldq_user(addr);
2674
                    break;
2675
                }
2676
            }
2677
        }
2678
        break;
2679
    case 0x14: // Bypass
2680
    case 0x15: // Bypass, non-cacheable
2681
    case 0x1c: // Bypass LE
2682
    case 0x1d: // Bypass, non-cacheable LE
2683
        {
2684
            switch(size) {
2685
            case 1:
2686
                ret = ldub_phys(addr);
2687
                break;
2688
            case 2:
2689
                ret = lduw_phys(addr);
2690
                break;
2691
            case 4:
2692
                ret = ldl_phys(addr);
2693
                break;
2694
            default:
2695
            case 8:
2696
                ret = ldq_phys(addr);
2697
                break;
2698
            }
2699
            break;
2700
        }
2701
    case 0x24: // Nucleus quad LDD 128 bit atomic
2702
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2703
        //  Only ldda allowed
2704
        raise_exception(TT_ILL_INSN);
2705
        return 0;
2706
    case 0x04: // Nucleus
2707
    case 0x0c: // Nucleus Little Endian (LE)
2708
    {
2709
        switch(size) {
2710
        case 1:
2711
            ret = ldub_nucleus(addr);
2712
            break;
2713
        case 2:
2714
            ret = lduw_nucleus(addr);
2715
            break;
2716
        case 4:
2717
            ret = ldl_nucleus(addr);
2718
            break;
2719
        default:
2720
        case 8:
2721
            ret = ldq_nucleus(addr);
2722
            break;
2723
        }
2724
        break;
2725
    }
2726
    case 0x4a: // UPA config
2727
        // XXX
2728
        break;
2729
    case 0x45: // LSU
2730
        ret = env->lsu;
2731
        break;
2732
    case 0x50: // I-MMU regs
2733
        {
2734
            int reg = (addr >> 3) & 0xf;
2735

    
2736
            if (reg == 0) {
2737
                // I-TSB Tag Target register
2738
                ret = ultrasparc_tag_target(env->immu.tag_access);
2739
            } else {
2740
                ret = env->immuregs[reg];
2741
            }
2742

    
2743
            break;
2744
        }
2745
    case 0x51: // I-MMU 8k TSB pointer
2746
        {
2747
            // env->immuregs[5] holds I-MMU TSB register value
2748
            // env->immuregs[6] holds I-MMU Tag Access register value
2749
            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2750
                                         8*1024);
2751
            break;
2752
        }
2753
    case 0x52: // I-MMU 64k TSB pointer
2754
        {
2755
            // env->immuregs[5] holds I-MMU TSB register value
2756
            // env->immuregs[6] holds I-MMU Tag Access register value
2757
            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2758
                                         64*1024);
2759
            break;
2760
        }
2761
    case 0x55: // I-MMU data access
2762
        {
2763
            int reg = (addr >> 3) & 0x3f;
2764

    
2765
            ret = env->itlb[reg].tte;
2766
            break;
2767
        }
2768
    case 0x56: // I-MMU tag read
2769
        {
2770
            int reg = (addr >> 3) & 0x3f;
2771

    
2772
            ret = env->itlb[reg].tag;
2773
            break;
2774
        }
2775
    case 0x58: // D-MMU regs
2776
        {
2777
            int reg = (addr >> 3) & 0xf;
2778

    
2779
            if (reg == 0) {
2780
                // D-TSB Tag Target register
2781
                ret = ultrasparc_tag_target(env->dmmu.tag_access);
2782
            } else {
2783
                ret = env->dmmuregs[reg];
2784
            }
2785
            break;
2786
        }
2787
    case 0x59: // D-MMU 8k TSB pointer
2788
        {
2789
            // env->dmmuregs[5] holds D-MMU TSB register value
2790
            // env->dmmuregs[6] holds D-MMU Tag Access register value
2791
            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2792
                                         8*1024);
2793
            break;
2794
        }
2795
    case 0x5a: // D-MMU 64k TSB pointer
2796
        {
2797
            // env->dmmuregs[5] holds D-MMU TSB register value
2798
            // env->dmmuregs[6] holds D-MMU Tag Access register value
2799
            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2800
                                         64*1024);
2801
            break;
2802
        }
2803
    case 0x5d: // D-MMU data access
2804
        {
2805
            int reg = (addr >> 3) & 0x3f;
2806

    
2807
            ret = env->dtlb[reg].tte;
2808
            break;
2809
        }
2810
    case 0x5e: // D-MMU tag read
2811
        {
2812
            int reg = (addr >> 3) & 0x3f;
2813

    
2814
            ret = env->dtlb[reg].tag;
2815
            break;
2816
        }
2817
    case 0x46: // D-cache data
2818
    case 0x47: // D-cache tag access
2819
    case 0x4b: // E-cache error enable
2820
    case 0x4c: // E-cache asynchronous fault status
2821
    case 0x4d: // E-cache asynchronous fault address
2822
    case 0x4e: // E-cache tag data
2823
    case 0x66: // I-cache instruction access
2824
    case 0x67: // I-cache tag access
2825
    case 0x6e: // I-cache predecode
2826
    case 0x6f: // I-cache LRU etc.
2827
    case 0x76: // E-cache tag
2828
    case 0x7e: // E-cache tag
2829
        break;
2830
    case 0x5b: // D-MMU data pointer
2831
    case 0x48: // Interrupt dispatch, RO
2832
    case 0x49: // Interrupt data receive
2833
    case 0x7f: // Incoming interrupt vector, RO
2834
        // XXX
2835
        break;
2836
    case 0x54: // I-MMU data in, WO
2837
    case 0x57: // I-MMU demap, WO
2838
    case 0x5c: // D-MMU data in, WO
2839
    case 0x5f: // D-MMU demap, WO
2840
    case 0x77: // Interrupt vector, WO
2841
    default:
2842
        do_unassigned_access(addr, 0, 0, 1, size);
2843
        ret = 0;
2844
        break;
2845
    }
2846

    
2847
    /* Convert from little endian */
2848
    switch (asi) {
2849
    case 0x0c: // Nucleus Little Endian (LE)
2850
    case 0x18: // As if user primary LE
2851
    case 0x19: // As if user secondary LE
2852
    case 0x1c: // Bypass LE
2853
    case 0x1d: // Bypass, non-cacheable LE
2854
    case 0x88: // Primary LE
2855
    case 0x89: // Secondary LE
2856
    case 0x8a: // Primary no-fault LE
2857
    case 0x8b: // Secondary no-fault LE
2858
        switch(size) {
2859
        case 2:
2860
            ret = bswap16(ret);
2861
            break;
2862
        case 4:
2863
            ret = bswap32(ret);
2864
            break;
2865
        case 8:
2866
            ret = bswap64(ret);
2867
            break;
2868
        default:
2869
            break;
2870
        }
2871
    default:
2872
        break;
2873
    }
2874

    
2875
    /* Convert to signed number */
2876
    if (sign) {
2877
        switch(size) {
2878
        case 1:
2879
            ret = (int8_t) ret;
2880
            break;
2881
        case 2:
2882
            ret = (int16_t) ret;
2883
            break;
2884
        case 4:
2885
            ret = (int32_t) ret;
2886
            break;
2887
        default:
2888
            break;
2889
        }
2890
    }
2891
#ifdef DEBUG_ASI
2892
    dump_asi("read ", last_addr, asi, size, ret);
2893
#endif
2894
    return ret;
2895
}
2896

    
2897
void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2898
{
2899
#ifdef DEBUG_ASI
2900
    dump_asi("write", addr, asi, size, val);
2901
#endif
2902

    
2903
    asi &= 0xff;
2904

    
2905
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2906
        || (cpu_has_hypervisor(env)
2907
            && asi >= 0x30 && asi < 0x80
2908
            && !(env->hpstate & HS_PRIV)))
2909
        raise_exception(TT_PRIV_ACT);
2910

    
2911
    helper_check_align(addr, size - 1);
2912
    addr = asi_address_mask(env, asi, addr);
2913

    
2914
    /* Convert to little endian */
2915
    switch (asi) {
2916
    case 0x0c: // Nucleus Little Endian (LE)
2917
    case 0x18: // As if user primary LE
2918
    case 0x19: // As if user secondary LE
2919
    case 0x1c: // Bypass LE
2920
    case 0x1d: // Bypass, non-cacheable LE
2921
    case 0x88: // Primary LE
2922
    case 0x89: // Secondary LE
2923
        switch(size) {
2924
        case 2:
2925
            val = bswap16(val);
2926
            break;
2927
        case 4:
2928
            val = bswap32(val);
2929
            break;
2930
        case 8:
2931
            val = bswap64(val);
2932
            break;
2933
        default:
2934
            break;
2935
        }
2936
    default:
2937
        break;
2938
    }
2939

    
2940
    switch(asi) {
2941
    case 0x10: // As if user primary
2942
    case 0x11: // As if user secondary
2943
    case 0x18: // As if user primary LE
2944
    case 0x19: // As if user secondary LE
2945
    case 0x80: // Primary
2946
    case 0x81: // Secondary
2947
    case 0x88: // Primary LE
2948
    case 0x89: // Secondary LE
2949
    case 0xe2: // UA2007 Primary block init
2950
    case 0xe3: // UA2007 Secondary block init
2951
        if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2952
            if (cpu_hypervisor_mode(env)) {
2953
                switch(size) {
2954
                case 1:
2955
                    stb_hypv(addr, val);
2956
                    break;
2957
                case 2:
2958
                    stw_hypv(addr, val);
2959
                    break;
2960
                case 4:
2961
                    stl_hypv(addr, val);
2962
                    break;
2963
                case 8:
2964
                default:
2965
                    stq_hypv(addr, val);
2966
                    break;
2967
                }
2968
            } else {
2969
                /* secondary space access has lowest asi bit equal to 1 */
2970
                if (asi & 1) {
2971
                    switch(size) {
2972
                    case 1:
2973
                        stb_kernel_secondary(addr, val);
2974
                        break;
2975
                    case 2:
2976
                        stw_kernel_secondary(addr, val);
2977
                        break;
2978
                    case 4:
2979
                        stl_kernel_secondary(addr, val);
2980
                        break;
2981
                    case 8:
2982
                    default:
2983
                        stq_kernel_secondary(addr, val);
2984
                        break;
2985
                    }
2986
                } else {
2987
                    switch(size) {
2988
                    case 1:
2989
                        stb_kernel(addr, val);
2990
                        break;
2991
                    case 2:
2992
                        stw_kernel(addr, val);
2993
                        break;
2994
                    case 4:
2995
                        stl_kernel(addr, val);
2996
                        break;
2997
                    case 8:
2998
                    default:
2999
                        stq_kernel(addr, val);
3000
                        break;
3001
                    }
3002
                }
3003
            }
3004
        } else {
3005
            /* secondary space access has lowest asi bit equal to 1 */
3006
            if (asi & 1) {
3007
                switch(size) {
3008
                case 1:
3009
                    stb_user_secondary(addr, val);
3010
                    break;
3011
                case 2:
3012
                    stw_user_secondary(addr, val);
3013
                    break;
3014
                case 4:
3015
                    stl_user_secondary(addr, val);
3016
                    break;
3017
                case 8:
3018
                default:
3019
                    stq_user_secondary(addr, val);
3020
                    break;
3021
                }
3022
            } else {
3023
                switch(size) {
3024
                case 1:
3025
                    stb_user(addr, val);
3026
                    break;
3027
                case 2:
3028
                    stw_user(addr, val);
3029
                    break;
3030
                case 4:
3031
                    stl_user(addr, val);
3032
                    break;
3033
                case 8:
3034
                default:
3035
                    stq_user(addr, val);
3036
                    break;
3037
                }
3038
            }
3039
        }
3040
        break;
3041
    case 0x14: // Bypass
3042
    case 0x15: // Bypass, non-cacheable
3043
    case 0x1c: // Bypass LE
3044
    case 0x1d: // Bypass, non-cacheable LE
3045
        {
3046
            switch(size) {
3047
            case 1:
3048
                stb_phys(addr, val);
3049
                break;
3050
            case 2:
3051
                stw_phys(addr, val);
3052
                break;
3053
            case 4:
3054
                stl_phys(addr, val);
3055
                break;
3056
            case 8:
3057
            default:
3058
                stq_phys(addr, val);
3059
                break;
3060
            }
3061
        }
3062
        return;
3063
    case 0x24: // Nucleus quad LDD 128 bit atomic
3064
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
3065
        //  Only ldda allowed
3066
        raise_exception(TT_ILL_INSN);
3067
        return;
3068
    case 0x04: // Nucleus
3069
    case 0x0c: // Nucleus Little Endian (LE)
3070
    {
3071
        switch(size) {
3072
        case 1:
3073
            stb_nucleus(addr, val);
3074
            break;
3075
        case 2:
3076
            stw_nucleus(addr, val);
3077
            break;
3078
        case 4:
3079
            stl_nucleus(addr, val);
3080
            break;
3081
        default:
3082
        case 8:
3083
            stq_nucleus(addr, val);
3084
            break;
3085
        }
3086
        break;
3087
    }
3088

    
3089
    case 0x4a: // UPA config
3090
        // XXX
3091
        return;
3092
    case 0x45: // LSU
3093
        {
3094
            uint64_t oldreg;
3095

    
3096
            oldreg = env->lsu;
3097
            env->lsu = val & (DMMU_E | IMMU_E);
3098
            // Mappings generated during D/I MMU disabled mode are
3099
            // invalid in normal mode
3100
            if (oldreg != env->lsu) {
3101
                DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
3102
                            oldreg, env->lsu);
3103
#ifdef DEBUG_MMU
3104
                dump_mmu(stdout, fprintf, env1);
3105
#endif
3106
                tlb_flush(env, 1);
3107
            }
3108
            return;
3109
        }
3110
    case 0x50: // I-MMU regs
3111
        {
3112
            int reg = (addr >> 3) & 0xf;
3113
            uint64_t oldreg;
3114

    
3115
            oldreg = env->immuregs[reg];
3116
            switch(reg) {
3117
            case 0: // RO
3118
                return;
3119
            case 1: // Not in I-MMU
3120
            case 2:
3121
                return;
3122
            case 3: // SFSR
3123
                if ((val & 1) == 0)
3124
                    val = 0; // Clear SFSR
3125
                env->immu.sfsr = val;
3126
                break;
3127
            case 4: // RO
3128
                return;
3129
            case 5: // TSB access
3130
                DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
3131
                            PRIx64 "\n", env->immu.tsb, val);
3132
                env->immu.tsb = val;
3133
                break;
3134
            case 6: // Tag access
3135
                env->immu.tag_access = val;
3136
                break;
3137
            case 7:
3138
            case 8:
3139
                return;
3140
            default:
3141
                break;
3142
            }
3143

    
3144
            if (oldreg != env->immuregs[reg]) {
3145
                DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
3146
                            PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
3147
            }
3148
#ifdef DEBUG_MMU
3149
            dump_mmu(stdout, fprintf, env);
3150
#endif
3151
            return;
3152
        }
3153
    case 0x54: // I-MMU data in
3154
        replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
3155
        return;
3156
    case 0x55: // I-MMU data access
3157
        {
3158
            // TODO: auto demap
3159

    
3160
            unsigned int i = (addr >> 3) & 0x3f;
3161

    
3162
            replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
3163

    
3164
#ifdef DEBUG_MMU
3165
            DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
3166
            dump_mmu(stdout, fprintf, env);
3167
#endif
3168
            return;
3169
        }
3170
    case 0x57: // I-MMU demap
3171
        demap_tlb(env->itlb, addr, "immu", env);
3172
        return;
3173
    case 0x58: // D-MMU regs
3174
        {
3175
            int reg = (addr >> 3) & 0xf;
3176
            uint64_t oldreg;
3177

    
3178
            oldreg = env->dmmuregs[reg];
3179
            switch(reg) {
3180
            case 0: // RO
3181
            case 4:
3182
                return;
3183
            case 3: // SFSR
3184
                if ((val & 1) == 0) {
3185
                    val = 0; // Clear SFSR, Fault address
3186
                    env->dmmu.sfar = 0;
3187
                }
3188
                env->dmmu.sfsr = val;
3189
                break;
3190
            case 1: // Primary context
3191
                env->dmmu.mmu_primary_context = val;
3192
                /* can be optimized to only flush MMU_USER_IDX
3193
                   and MMU_KERNEL_IDX entries */
3194
                tlb_flush(env, 1);
3195
                break;
3196
            case 2: // Secondary context
3197
                env->dmmu.mmu_secondary_context = val;
3198
                /* can be optimized to only flush MMU_USER_SECONDARY_IDX
3199
                   and MMU_KERNEL_SECONDARY_IDX entries */
3200
                tlb_flush(env, 1);
3201
                break;
3202
            case 5: // TSB access
3203
                DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
3204
                            PRIx64 "\n", env->dmmu.tsb, val);
3205
                env->dmmu.tsb = val;
3206
                break;
3207
            case 6: // Tag access
3208
                env->dmmu.tag_access = val;
3209
                break;
3210
            case 7: // Virtual Watchpoint
3211
            case 8: // Physical Watchpoint
3212
            default:
3213
                env->dmmuregs[reg] = val;
3214
                break;
3215
            }
3216

    
3217
            if (oldreg != env->dmmuregs[reg]) {
3218
                DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
3219
                            PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
3220
            }
3221
#ifdef DEBUG_MMU
3222
            dump_mmu(stdout, fprintf, env);
3223
#endif
3224
            return;
3225
        }
3226
    case 0x5c: // D-MMU data in
3227
        replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
3228
        return;
3229
    case 0x5d: // D-MMU data access
3230
        {
3231
            unsigned int i = (addr >> 3) & 0x3f;
3232

    
3233
            replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
3234

    
3235
#ifdef DEBUG_MMU
3236
            DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
3237
            dump_mmu(stdout, fprintf, env);
3238
#endif
3239
            return;
3240
        }
3241
    case 0x5f: // D-MMU demap
3242
        demap_tlb(env->dtlb, addr, "dmmu", env);
3243
        return;
3244
    case 0x49: // Interrupt data receive
3245
        // XXX
3246
        return;
3247
    case 0x46: // D-cache data
3248
    case 0x47: // D-cache tag access
3249
    case 0x4b: // E-cache error enable
3250
    case 0x4c: // E-cache asynchronous fault status
3251
    case 0x4d: // E-cache asynchronous fault address
3252
    case 0x4e: // E-cache tag data
3253
    case 0x66: // I-cache instruction access
3254
    case 0x67: // I-cache tag access
3255
    case 0x6e: // I-cache predecode
3256
    case 0x6f: // I-cache LRU etc.
3257
    case 0x76: // E-cache tag
3258
    case 0x7e: // E-cache tag
3259
        return;
3260
    case 0x51: // I-MMU 8k TSB pointer, RO
3261
    case 0x52: // I-MMU 64k TSB pointer, RO
3262
    case 0x56: // I-MMU tag read, RO
3263
    case 0x59: // D-MMU 8k TSB pointer, RO
3264
    case 0x5a: // D-MMU 64k TSB pointer, RO
3265
    case 0x5b: // D-MMU data pointer, RO
3266
    case 0x5e: // D-MMU tag read, RO
3267
    case 0x48: // Interrupt dispatch, RO
3268
    case 0x7f: // Incoming interrupt vector, RO
3269
    case 0x82: // Primary no-fault, RO
3270
    case 0x83: // Secondary no-fault, RO
3271
    case 0x8a: // Primary no-fault LE, RO
3272
    case 0x8b: // Secondary no-fault LE, RO
3273
    default:
3274
        do_unassigned_access(addr, 1, 0, 1, size);
3275
        return;
3276
    }
3277
}
3278
#endif /* CONFIG_USER_ONLY */
3279

    
3280
void helper_ldda_asi(target_ulong addr, int asi, int rd)
3281
{
3282
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
3283
        || (cpu_has_hypervisor(env)
3284
            && asi >= 0x30 && asi < 0x80
3285
            && !(env->hpstate & HS_PRIV)))
3286
        raise_exception(TT_PRIV_ACT);
3287

    
3288
    addr = asi_address_mask(env, asi, addr);
3289

    
3290
    switch (asi) {
3291
#if !defined(CONFIG_USER_ONLY)
3292
    case 0x24: // Nucleus quad LDD 128 bit atomic
3293
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
3294
        helper_check_align(addr, 0xf);
3295
        if (rd == 0) {
3296
            env->gregs[1] = ldq_nucleus(addr + 8);
3297
            if (asi == 0x2c)
3298
                bswap64s(&env->gregs[1]);
3299
        } else if (rd < 8) {
3300
            env->gregs[rd] = ldq_nucleus(addr);
3301
            env->gregs[rd + 1] = ldq_nucleus(addr + 8);
3302
            if (asi == 0x2c) {
3303
                bswap64s(&env->gregs[rd]);
3304
                bswap64s(&env->gregs[rd + 1]);
3305
            }
3306
        } else {
3307
            env->regwptr[rd] = ldq_nucleus(addr);
3308
            env->regwptr[rd + 1] = ldq_nucleus(addr + 8);
3309
            if (asi == 0x2c) {
3310
                bswap64s(&env->regwptr[rd]);
3311
                bswap64s(&env->regwptr[rd + 1]);
3312
            }
3313
        }
3314
        break;
3315
#endif
3316
    default:
3317
        helper_check_align(addr, 0x3);
3318
        if (rd == 0)
3319
            env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
3320
        else if (rd < 8) {
3321
            env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
3322
            env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3323
        } else {
3324
            env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
3325
            env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3326
        }
3327
        break;
3328
    }
3329
}
3330

    
3331
void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
3332
{
3333
    unsigned int i;
3334
    target_ulong val;
3335

    
3336
    helper_check_align(addr, 3);
3337
    addr = asi_address_mask(env, asi, addr);
3338

    
3339
    switch (asi) {
3340
    case 0xf0: // Block load primary
3341
    case 0xf1: // Block load secondary
3342
    case 0xf8: // Block load primary LE
3343
    case 0xf9: // Block load secondary LE
3344
        if (rd & 7) {
3345
            raise_exception(TT_ILL_INSN);
3346
            return;
3347
        }
3348
        helper_check_align(addr, 0x3f);
3349
        for (i = 0; i < 16; i++) {
3350
            *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
3351
                                                         0);
3352
            addr += 4;
3353
        }
3354

    
3355
        return;
3356
    case 0x70: // Block load primary, user privilege
3357
    case 0x71: // Block load secondary, user privilege
3358
        if (rd & 7) {
3359
            raise_exception(TT_ILL_INSN);
3360
            return;
3361
        }
3362
        helper_check_align(addr, 0x3f);
3363
        for (i = 0; i < 16; i++) {
3364
            *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x1f, 4,
3365
                                                         0);
3366
            addr += 4;
3367
        }
3368

    
3369
        return;
3370
    default:
3371
        break;
3372
    }
3373

    
3374
    val = helper_ld_asi(addr, asi, size, 0);
3375
    switch(size) {
3376
    default:
3377
    case 4:
3378
        *((uint32_t *)&env->fpr[rd]) = val;
3379
        break;
3380
    case 8:
3381
        *((int64_t *)&DT0) = val;
3382
        break;
3383
    case 16:
3384
        // XXX
3385
        break;
3386
    }
3387
}
3388

    
3389
void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
3390
{
3391
    unsigned int i;
3392
    target_ulong val = 0;
3393

    
3394
    helper_check_align(addr, 3);
3395
    addr = asi_address_mask(env, asi, addr);
3396

    
3397
    switch (asi) {
3398
    case 0xe0: // UA2007 Block commit store primary (cache flush)
3399
    case 0xe1: // UA2007 Block commit store secondary (cache flush)
3400
    case 0xf0: // Block store primary
3401
    case 0xf1: // Block store secondary
3402
    case 0xf8: // Block store primary LE
3403
    case 0xf9: // Block store secondary LE
3404
        if (rd & 7) {
3405
            raise_exception(TT_ILL_INSN);
3406
            return;
3407
        }
3408
        helper_check_align(addr, 0x3f);
3409
        for (i = 0; i < 16; i++) {
3410
            val = *(uint32_t *)&env->fpr[rd++];
3411
            helper_st_asi(addr, val, asi & 0x8f, 4);
3412
            addr += 4;
3413
        }
3414

    
3415
        return;
3416
    case 0x70: // Block store primary, user privilege
3417
    case 0x71: // Block store secondary, user privilege
3418
        if (rd & 7) {
3419
            raise_exception(TT_ILL_INSN);
3420
            return;
3421
        }
3422
        helper_check_align(addr, 0x3f);
3423
        for (i = 0; i < 16; i++) {
3424
            val = *(uint32_t *)&env->fpr[rd++];
3425
            helper_st_asi(addr, val, asi & 0x1f, 4);
3426
            addr += 4;
3427
        }
3428

    
3429
        return;
3430
    default:
3431
        break;
3432
    }
3433

    
3434
    switch(size) {
3435
    default:
3436
    case 4:
3437
        val = *((uint32_t *)&env->fpr[rd]);
3438
        break;
3439
    case 8:
3440
        val = *((int64_t *)&DT0);
3441
        break;
3442
    case 16:
3443
        // XXX
3444
        break;
3445
    }
3446
    helper_st_asi(addr, val, asi, size);
3447
}
3448

    
3449
target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
3450
                            target_ulong val2, uint32_t asi)
3451
{
3452
    target_ulong ret;
3453

    
3454
    val2 &= 0xffffffffUL;
3455
    ret = helper_ld_asi(addr, asi, 4, 0);
3456
    ret &= 0xffffffffUL;
3457
    if (val2 == ret)
3458
        helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4);
3459
    return ret;
3460
}
3461

    
3462
target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
3463
                             target_ulong val2, uint32_t asi)
3464
{
3465
    target_ulong ret;
3466

    
3467
    ret = helper_ld_asi(addr, asi, 8, 0);
3468
    if (val2 == ret)
3469
        helper_st_asi(addr, val1, asi, 8);
3470
    return ret;
3471
}
3472
#endif /* TARGET_SPARC64 */
3473

    
3474
#ifndef TARGET_SPARC64
3475
void helper_rett(void)
3476
{
3477
    unsigned int cwp;
3478

    
3479
    if (env->psret == 1)
3480
        raise_exception(TT_ILL_INSN);
3481

    
3482
    env->psret = 1;
3483
    cwp = cwp_inc(env->cwp + 1) ;
3484
    if (env->wim & (1 << cwp)) {
3485
        raise_exception(TT_WIN_UNF);
3486
    }
3487
    set_cwp(cwp);
3488
    env->psrs = env->psrps;
3489
}
3490
#endif
3491

    
3492
static target_ulong helper_udiv_common(target_ulong a, target_ulong b, int cc)
3493
{
3494
    int overflow = 0;
3495
    uint64_t x0;
3496
    uint32_t x1;
3497

    
3498
    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3499
    x1 = (b & 0xffffffff);
3500

    
3501
    if (x1 == 0) {
3502
        raise_exception(TT_DIV_ZERO);
3503
    }
3504

    
3505
    x0 = x0 / x1;
3506
    if (x0 > 0xffffffff) {
3507
        x0 = 0xffffffff;
3508
        overflow = 1;
3509
    }
3510

    
3511
    if (cc) {
3512
        env->cc_dst = x0;
3513
        env->cc_src2 = overflow;
3514
        env->cc_op = CC_OP_DIV;
3515
    }
3516
    return x0;
3517
}
3518

    
3519
target_ulong helper_udiv(target_ulong a, target_ulong b)
3520
{
3521
    return helper_udiv_common(a, b, 0);
3522
}
3523

    
3524
target_ulong helper_udiv_cc(target_ulong a, target_ulong b)
3525
{
3526
    return helper_udiv_common(a, b, 1);
3527
}
3528

    
3529
static target_ulong helper_sdiv_common(target_ulong a, target_ulong b, int cc)
3530
{
3531
    int overflow = 0;
3532
    int64_t x0;
3533
    int32_t x1;
3534

    
3535
    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3536
    x1 = (b & 0xffffffff);
3537

    
3538
    if (x1 == 0) {
3539
        raise_exception(TT_DIV_ZERO);
3540
    }
3541

    
3542
    x0 = x0 / x1;
3543
    if ((int32_t) x0 != x0) {
3544
        x0 = x0 < 0 ? 0x80000000: 0x7fffffff;
3545
        overflow = 1;
3546
    }
3547

    
3548
    if (cc) {
3549
        env->cc_dst = x0;
3550
        env->cc_src2 = overflow;
3551
        env->cc_op = CC_OP_DIV;
3552
    }
3553
    return x0;
3554
}
3555

    
3556
target_ulong helper_sdiv(target_ulong a, target_ulong b)
3557
{
3558
    return helper_sdiv_common(a, b, 0);
3559
}
3560

    
3561
target_ulong helper_sdiv_cc(target_ulong a, target_ulong b)
3562
{
3563
    return helper_sdiv_common(a, b, 1);
3564
}
3565

    
3566
void helper_stdf(target_ulong addr, int mem_idx)
3567
{
3568
    helper_check_align(addr, 7);
3569
#if !defined(CONFIG_USER_ONLY)
3570
    switch (mem_idx) {
3571
    case MMU_USER_IDX:
3572
        stfq_user(addr, DT0);
3573
        break;
3574
    case MMU_KERNEL_IDX:
3575
        stfq_kernel(addr, DT0);
3576
        break;
3577
#ifdef TARGET_SPARC64
3578
    case MMU_HYPV_IDX:
3579
        stfq_hypv(addr, DT0);
3580
        break;
3581
#endif
3582
    default:
3583
        DPRINTF_MMU("helper_stdf: need to check MMU idx %d\n", mem_idx);
3584
        break;
3585
    }
3586
#else
3587
    stfq_raw(address_mask(env, addr), DT0);
3588
#endif
3589
}
3590

    
3591
void helper_lddf(target_ulong addr, int mem_idx)
3592
{
3593
    helper_check_align(addr, 7);
3594
#if !defined(CONFIG_USER_ONLY)
3595
    switch (mem_idx) {
3596
    case MMU_USER_IDX:
3597
        DT0 = ldfq_user(addr);
3598
        break;
3599
    case MMU_KERNEL_IDX:
3600
        DT0 = ldfq_kernel(addr);
3601
        break;
3602
#ifdef TARGET_SPARC64
3603
    case MMU_HYPV_IDX:
3604
        DT0 = ldfq_hypv(addr);
3605
        break;
3606
#endif
3607
    default:
3608
        DPRINTF_MMU("helper_lddf: need to check MMU idx %d\n", mem_idx);
3609
        break;
3610
    }
3611
#else
3612
    DT0 = ldfq_raw(address_mask(env, addr));
3613
#endif
3614
}
3615

    
3616
void helper_ldqf(target_ulong addr, int mem_idx)
3617
{
3618
    // XXX add 128 bit load
3619
    CPU_QuadU u;
3620

    
3621
    helper_check_align(addr, 7);
3622
#if !defined(CONFIG_USER_ONLY)
3623
    switch (mem_idx) {
3624
    case MMU_USER_IDX:
3625
        u.ll.upper = ldq_user(addr);
3626
        u.ll.lower = ldq_user(addr + 8);
3627
        QT0 = u.q;
3628
        break;
3629
    case MMU_KERNEL_IDX:
3630
        u.ll.upper = ldq_kernel(addr);
3631
        u.ll.lower = ldq_kernel(addr + 8);
3632
        QT0 = u.q;
3633
        break;
3634
#ifdef TARGET_SPARC64
3635
    case MMU_HYPV_IDX:
3636
        u.ll.upper = ldq_hypv(addr);
3637
        u.ll.lower = ldq_hypv(addr + 8);
3638
        QT0 = u.q;
3639
        break;
3640
#endif
3641
    default:
3642
        DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx);
3643
        break;
3644
    }
3645
#else
3646
    u.ll.upper = ldq_raw(address_mask(env, addr));
3647
    u.ll.lower = ldq_raw(address_mask(env, addr + 8));
3648
    QT0 = u.q;
3649
#endif
3650
}
3651

    
3652
void helper_stqf(target_ulong addr, int mem_idx)
3653
{
3654
    // XXX add 128 bit store
3655
    CPU_QuadU u;
3656

    
3657
    helper_check_align(addr, 7);
3658
#if !defined(CONFIG_USER_ONLY)
3659
    switch (mem_idx) {
3660
    case MMU_USER_IDX:
3661
        u.q = QT0;
3662
        stq_user(addr, u.ll.upper);
3663
        stq_user(addr + 8, u.ll.lower);
3664
        break;
3665
    case MMU_KERNEL_IDX:
3666
        u.q = QT0;
3667
        stq_kernel(addr, u.ll.upper);
3668
        stq_kernel(addr + 8, u.ll.lower);
3669
        break;
3670
#ifdef TARGET_SPARC64
3671
    case MMU_HYPV_IDX:
3672
        u.q = QT0;
3673
        stq_hypv(addr, u.ll.upper);
3674
        stq_hypv(addr + 8, u.ll.lower);
3675
        break;
3676
#endif
3677
    default:
3678
        DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx);
3679
        break;
3680
    }
3681
#else
3682
    u.q = QT0;
3683
    stq_raw(address_mask(env, addr), u.ll.upper);
3684
    stq_raw(address_mask(env, addr + 8), u.ll.lower);
3685
#endif
3686
}
3687

    
3688
static inline void set_fsr(void)
3689
{
3690
    int rnd_mode;
3691

    
3692
    switch (env->fsr & FSR_RD_MASK) {
3693
    case FSR_RD_NEAREST:
3694
        rnd_mode = float_round_nearest_even;
3695
        break;
3696
    default:
3697
    case FSR_RD_ZERO:
3698
        rnd_mode = float_round_to_zero;
3699
        break;
3700
    case FSR_RD_POS:
3701
        rnd_mode = float_round_up;
3702
        break;
3703
    case FSR_RD_NEG:
3704
        rnd_mode = float_round_down;
3705
        break;
3706
    }
3707
    set_float_rounding_mode(rnd_mode, &env->fp_status);
3708
}
3709

    
3710
void helper_ldfsr(uint32_t new_fsr)
3711
{
3712
    env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK);
3713
    set_fsr();
3714
}
3715

    
3716
#ifdef TARGET_SPARC64
3717
void helper_ldxfsr(uint64_t new_fsr)
3718
{
3719
    env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK);
3720
    set_fsr();
3721
}
3722
#endif
3723

    
3724
void helper_debug(void)
3725
{
3726
    env->exception_index = EXCP_DEBUG;
3727
    cpu_loop_exit();
3728
}
3729

    
3730
#ifndef TARGET_SPARC64
3731
/* XXX: use another pointer for %iN registers to avoid slow wrapping
3732
   handling ? */
3733
void helper_save(void)
3734
{
3735
    uint32_t cwp;
3736

    
3737
    cwp = cwp_dec(env->cwp - 1);
3738
    if (env->wim & (1 << cwp)) {
3739
        raise_exception(TT_WIN_OVF);
3740
    }
3741
    set_cwp(cwp);
3742
}
3743

    
3744
void helper_restore(void)
3745
{
3746
    uint32_t cwp;
3747

    
3748
    cwp = cwp_inc(env->cwp + 1);
3749
    if (env->wim & (1 << cwp)) {
3750
        raise_exception(TT_WIN_UNF);
3751
    }
3752
    set_cwp(cwp);
3753
}
3754

    
3755
void helper_wrpsr(target_ulong new_psr)
3756
{
3757
    if ((new_psr & PSR_CWP) >= env->nwindows) {
3758
        raise_exception(TT_ILL_INSN);
3759
    } else {
3760
        cpu_put_psr(env, new_psr);
3761
    }
3762
}
3763

    
3764
target_ulong helper_rdpsr(void)
3765
{
3766
    return get_psr();
3767
}
3768

    
3769
#else
3770
/* XXX: use another pointer for %iN registers to avoid slow wrapping
3771
   handling ? */
3772
void helper_save(void)
3773
{
3774
    uint32_t cwp;
3775

    
3776
    cwp = cwp_dec(env->cwp - 1);
3777
    if (env->cansave == 0) {
3778
        raise_exception(TT_SPILL | (env->otherwin != 0 ?
3779
                                    (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3780
                                    ((env->wstate & 0x7) << 2)));
3781
    } else {
3782
        if (env->cleanwin - env->canrestore == 0) {
3783
            // XXX Clean windows without trap
3784
            raise_exception(TT_CLRWIN);
3785
        } else {
3786
            env->cansave--;
3787
            env->canrestore++;
3788
            set_cwp(cwp);
3789
        }
3790
    }
3791
}
3792

    
3793
void helper_restore(void)
3794
{
3795
    uint32_t cwp;
3796

    
3797
    cwp = cwp_inc(env->cwp + 1);
3798
    if (env->canrestore == 0) {
3799
        raise_exception(TT_FILL | (env->otherwin != 0 ?
3800
                                   (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3801
                                   ((env->wstate & 0x7) << 2)));
3802
    } else {
3803
        env->cansave++;
3804
        env->canrestore--;
3805
        set_cwp(cwp);
3806
    }
3807
}
3808

    
3809
void helper_flushw(void)
3810
{
3811
    if (env->cansave != env->nwindows - 2) {
3812
        raise_exception(TT_SPILL | (env->otherwin != 0 ?
3813
                                    (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3814
                                    ((env->wstate & 0x7) << 2)));
3815
    }
3816
}
3817

    
3818
void helper_saved(void)
3819
{
3820
    env->cansave++;
3821
    if (env->otherwin == 0)
3822
        env->canrestore--;
3823
    else
3824
        env->otherwin--;
3825
}
3826

    
3827
void helper_restored(void)
3828
{
3829
    env->canrestore++;
3830
    if (env->cleanwin < env->nwindows - 1)
3831
        env->cleanwin++;
3832
    if (env->otherwin == 0)
3833
        env->cansave--;
3834
    else
3835
        env->otherwin--;
3836
}
3837

    
3838
static target_ulong get_ccr(void)
3839
{
3840
    target_ulong psr;
3841

    
3842
    psr = get_psr();
3843

    
3844
    return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20);
3845
}
3846

    
3847
target_ulong cpu_get_ccr(CPUState *env1)
3848
{
3849
    CPUState *saved_env;
3850
    target_ulong ret;
3851

    
3852
    saved_env = env;
3853
    env = env1;
3854
    ret = get_ccr();
3855
    env = saved_env;
3856
    return ret;
3857
}
3858

    
3859
static void put_ccr(target_ulong val)
3860
{
3861
    target_ulong tmp = val;
3862

    
3863
    env->xcc = (tmp >> 4) << 20;
3864
    env->psr = (tmp & 0xf) << 20;
3865
    CC_OP = CC_OP_FLAGS;
3866
}
3867

    
3868
void cpu_put_ccr(CPUState *env1, target_ulong val)
3869
{
3870
    CPUState *saved_env;
3871

    
3872
    saved_env = env;
3873
    env = env1;
3874
    put_ccr(val);
3875
    env = saved_env;
3876
}
3877

    
3878
static target_ulong get_cwp64(void)
3879
{
3880
    return env->nwindows - 1 - env->cwp;
3881
}
3882

    
3883
target_ulong cpu_get_cwp64(CPUState *env1)
3884
{
3885
    CPUState *saved_env;
3886
    target_ulong ret;
3887

    
3888
    saved_env = env;
3889
    env = env1;
3890
    ret = get_cwp64();
3891
    env = saved_env;
3892
    return ret;
3893
}
3894

    
3895
static void put_cwp64(int cwp)
3896
{
3897
    if (unlikely(cwp >= env->nwindows || cwp < 0)) {
3898
        cwp %= env->nwindows;
3899
    }
3900
    set_cwp(env->nwindows - 1 - cwp);
3901
}
3902

    
3903
void cpu_put_cwp64(CPUState *env1, int cwp)
3904
{
3905
    CPUState *saved_env;
3906

    
3907
    saved_env = env;
3908
    env = env1;
3909
    put_cwp64(cwp);
3910
    env = saved_env;
3911
}
3912

    
3913
target_ulong helper_rdccr(void)
3914
{
3915
    return get_ccr();
3916
}
3917

    
3918
void helper_wrccr(target_ulong new_ccr)
3919
{
3920
    put_ccr(new_ccr);
3921
}
3922

    
3923
// CWP handling is reversed in V9, but we still use the V8 register
3924
// order.
3925
target_ulong helper_rdcwp(void)
3926
{
3927
    return get_cwp64();
3928
}
3929

    
3930
void helper_wrcwp(target_ulong new_cwp)
3931
{
3932
    put_cwp64(new_cwp);
3933
}
3934

    
3935
// This function uses non-native bit order
3936
#define GET_FIELD(X, FROM, TO)                                  \
3937
    ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
3938

    
3939
// This function uses the order in the manuals, i.e. bit 0 is 2^0
3940
#define GET_FIELD_SP(X, FROM, TO)               \
3941
    GET_FIELD(X, 63 - (TO), 63 - (FROM))
3942

    
3943
target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
3944
{
3945
    return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
3946
        (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
3947
        (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
3948
        (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
3949
        (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
3950
        (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
3951
        (((pixel_addr >> 55) & 1) << 4) |
3952
        (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
3953
        GET_FIELD_SP(pixel_addr, 11, 12);
3954
}
3955

    
3956
target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
3957
{
3958
    uint64_t tmp;
3959

    
3960
    tmp = addr + offset;
3961
    env->gsr &= ~7ULL;
3962
    env->gsr |= tmp & 7ULL;
3963
    return tmp & ~7ULL;
3964
}
3965

    
3966
target_ulong helper_popc(target_ulong val)
3967
{
3968
    return ctpop64(val);
3969
}
3970

    
3971
static inline uint64_t *get_gregset(uint32_t pstate)
3972
{
3973
    switch (pstate) {
3974
    default:
3975
        DPRINTF_PSTATE("ERROR in get_gregset: active pstate bits=%x%s%s%s\n",
3976
                pstate,
3977
                (pstate & PS_IG) ? " IG" : "",
3978
                (pstate & PS_MG) ? " MG" : "",
3979
                (pstate & PS_AG) ? " AG" : "");
3980
        /* pass through to normal set of global registers */
3981
    case 0:
3982
        return env->bgregs;
3983
    case PS_AG:
3984
        return env->agregs;
3985
    case PS_MG:
3986
        return env->mgregs;
3987
    case PS_IG:
3988
        return env->igregs;
3989
    }
3990
}
3991

    
3992
static inline void change_pstate(uint32_t new_pstate)
3993
{
3994
    uint32_t pstate_regs, new_pstate_regs;
3995
    uint64_t *src, *dst;
3996

    
3997
    if (env->def->features & CPU_FEATURE_GL) {
3998
        // PS_AG is not implemented in this case
3999
        new_pstate &= ~PS_AG;
4000
    }
4001

    
4002
    pstate_regs = env->pstate & 0xc01;
4003
    new_pstate_regs = new_pstate & 0xc01;
4004

    
4005
    if (new_pstate_regs != pstate_regs) {
4006
        DPRINTF_PSTATE("change_pstate: switching regs old=%x new=%x\n",
4007
                       pstate_regs, new_pstate_regs);
4008
        // Switch global register bank
4009
        src = get_gregset(new_pstate_regs);
4010
        dst = get_gregset(pstate_regs);
4011
        memcpy32(dst, env->gregs);
4012
        memcpy32(env->gregs, src);
4013
    }
4014
    else {
4015
        DPRINTF_PSTATE("change_pstate: regs new=%x (unchanged)\n",
4016
                       new_pstate_regs);
4017
    }
4018
    env->pstate = new_pstate;
4019
}
4020

    
4021
void helper_wrpstate(target_ulong new_state)
4022
{
4023
    change_pstate(new_state & 0xf3f);
4024

    
4025
#if !defined(CONFIG_USER_ONLY)
4026
    if (cpu_interrupts_enabled(env)) {
4027
        cpu_check_irqs(env);
4028
    }
4029
#endif
4030
}
4031

    
4032
void helper_wrpil(target_ulong new_pil)
4033
{
4034
#if !defined(CONFIG_USER_ONLY)
4035
    DPRINTF_PSTATE("helper_wrpil old=%x new=%x\n",
4036
                   env->psrpil, (uint32_t)new_pil);
4037

    
4038
    env->psrpil = new_pil;
4039

    
4040
    if (cpu_interrupts_enabled(env)) {
4041
        cpu_check_irqs(env);
4042
    }
4043
#endif
4044
}
4045

    
4046
void helper_done(void)
4047
{
4048
    trap_state* tsptr = cpu_tsptr(env);
4049

    
4050
    env->pc = tsptr->tnpc;
4051
    env->npc = tsptr->tnpc + 4;
4052
    put_ccr(tsptr->tstate >> 32);
4053
    env->asi = (tsptr->tstate >> 24) & 0xff;
4054
    change_pstate((tsptr->tstate >> 8) & 0xf3f);
4055
    put_cwp64(tsptr->tstate & 0xff);
4056
    env->tl--;
4057

    
4058
    DPRINTF_PSTATE("... helper_done tl=%d\n", env->tl);
4059

    
4060
#if !defined(CONFIG_USER_ONLY)
4061
    if (cpu_interrupts_enabled(env)) {
4062
        cpu_check_irqs(env);
4063
    }
4064
#endif
4065
}
4066

    
4067
void helper_retry(void)
4068
{
4069
    trap_state* tsptr = cpu_tsptr(env);
4070

    
4071
    env->pc = tsptr->tpc;
4072
    env->npc = tsptr->tnpc;
4073
    put_ccr(tsptr->tstate >> 32);
4074
    env->asi = (tsptr->tstate >> 24) & 0xff;
4075
    change_pstate((tsptr->tstate >> 8) & 0xf3f);
4076
    put_cwp64(tsptr->tstate & 0xff);
4077
    env->tl--;
4078

    
4079
    DPRINTF_PSTATE("... helper_retry tl=%d\n", env->tl);
4080

    
4081
#if !defined(CONFIG_USER_ONLY)
4082
    if (cpu_interrupts_enabled(env)) {
4083
        cpu_check_irqs(env);
4084
    }
4085
#endif
4086
}
4087

    
4088
static void do_modify_softint(const char* operation, uint32_t value)
4089
{
4090
    if (env->softint != value) {
4091
        env->softint = value;
4092
        DPRINTF_PSTATE(": %s new %08x\n", operation, env->softint);
4093
#if !defined(CONFIG_USER_ONLY)
4094
        if (cpu_interrupts_enabled(env)) {
4095
            cpu_check_irqs(env);
4096
        }
4097
#endif
4098
    }
4099
}
4100

    
4101
void helper_set_softint(uint64_t value)
4102
{
4103
    do_modify_softint("helper_set_softint", env->softint | (uint32_t)value);
4104
}
4105

    
4106
void helper_clear_softint(uint64_t value)
4107
{
4108
    do_modify_softint("helper_clear_softint", env->softint & (uint32_t)~value);
4109
}
4110

    
4111
void helper_write_softint(uint64_t value)
4112
{
4113
    do_modify_softint("helper_write_softint", (uint32_t)value);
4114
}
4115
#endif
4116

    
4117
#ifdef TARGET_SPARC64
4118
#ifdef DEBUG_PCALL
4119
static const char * const excp_names[0x80] = {
4120
    [TT_TFAULT] = "Instruction Access Fault",
4121
    [TT_TMISS] = "Instruction Access MMU Miss",
4122
    [TT_CODE_ACCESS] = "Instruction Access Error",
4123
    [TT_ILL_INSN] = "Illegal Instruction",
4124
    [TT_PRIV_INSN] = "Privileged Instruction",
4125
    [TT_NFPU_INSN] = "FPU Disabled",
4126
    [TT_FP_EXCP] = "FPU Exception",
4127
    [TT_TOVF] = "Tag Overflow",
4128
    [TT_CLRWIN] = "Clean Windows",
4129
    [TT_DIV_ZERO] = "Division By Zero",
4130
    [TT_DFAULT] = "Data Access Fault",
4131
    [TT_DMISS] = "Data Access MMU Miss",
4132
    [TT_DATA_ACCESS] = "Data Access Error",
4133
    [TT_DPROT] = "Data Protection Error",
4134
    [TT_UNALIGNED] = "Unaligned Memory Access",
4135
    [TT_PRIV_ACT] = "Privileged Action",
4136
    [TT_EXTINT | 0x1] = "External Interrupt 1",
4137
    [TT_EXTINT | 0x2] = "External Interrupt 2",
4138
    [TT_EXTINT | 0x3] = "External Interrupt 3",
4139
    [TT_EXTINT | 0x4] = "External Interrupt 4",
4140
    [TT_EXTINT | 0x5] = "External Interrupt 5",
4141
    [TT_EXTINT | 0x6] = "External Interrupt 6",
4142
    [TT_EXTINT | 0x7] = "External Interrupt 7",
4143
    [TT_EXTINT | 0x8] = "External Interrupt 8",
4144
    [TT_EXTINT | 0x9] = "External Interrupt 9",
4145
    [TT_EXTINT | 0xa] = "External Interrupt 10",
4146
    [TT_EXTINT | 0xb] = "External Interrupt 11",
4147
    [TT_EXTINT | 0xc] = "External Interrupt 12",
4148
    [TT_EXTINT | 0xd] = "External Interrupt 13",
4149
    [TT_EXTINT | 0xe] = "External Interrupt 14",
4150
    [TT_EXTINT | 0xf] = "External Interrupt 15",
4151
};
4152
#endif
4153

    
4154
trap_state* cpu_tsptr(CPUState* env)
4155
{
4156
    return &env->ts[env->tl & MAXTL_MASK];
4157
}
4158

    
4159
void do_interrupt(CPUState *env)
4160
{
4161
    int intno = env->exception_index;
4162
    trap_state* tsptr;
4163

    
4164
#ifdef DEBUG_PCALL
4165
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
4166
        static int count;
4167
        const char *name;
4168

    
4169
        if (intno < 0 || intno >= 0x180)
4170
            name = "Unknown";
4171
        else if (intno >= 0x100)
4172
            name = "Trap Instruction";
4173
        else if (intno >= 0xc0)
4174
            name = "Window Fill";
4175
        else if (intno >= 0x80)
4176
            name = "Window Spill";
4177
        else {
4178
            name = excp_names[intno];
4179
            if (!name)
4180
                name = "Unknown";
4181
        }
4182

    
4183
        qemu_log("%6d: %s (v=%04x) pc=%016" PRIx64 " npc=%016" PRIx64
4184
                " SP=%016" PRIx64 "\n",
4185
                count, name, intno,
4186
                env->pc,
4187
                env->npc, env->regwptr[6]);
4188
        log_cpu_state(env, 0);
4189
#if 0
4190
        {
4191
            int i;
4192
            uint8_t *ptr;
4193

4194
            qemu_log("       code=");
4195
            ptr = (uint8_t *)env->pc;
4196
            for(i = 0; i < 16; i++) {
4197
                qemu_log(" %02x", ldub(ptr + i));
4198
            }
4199
            qemu_log("\n");
4200
        }
4201
#endif
4202
        count++;
4203
    }
4204
#endif
4205
#if !defined(CONFIG_USER_ONLY)
4206
    if (env->tl >= env->maxtl) {
4207
        cpu_abort(env, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
4208
                  " Error state", env->exception_index, env->tl, env->maxtl);
4209
        return;
4210
    }
4211
#endif
4212
    if (env->tl < env->maxtl - 1) {
4213
        env->tl++;
4214
    } else {
4215
        env->pstate |= PS_RED;
4216
        if (env->tl < env->maxtl)
4217
            env->tl++;
4218
    }
4219
    tsptr = cpu_tsptr(env);
4220

    
4221
    tsptr->tstate = (get_ccr() << 32) |
4222
        ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
4223
        get_cwp64();
4224
    tsptr->tpc = env->pc;
4225
    tsptr->tnpc = env->npc;
4226
    tsptr->tt = intno;
4227

    
4228
    switch (intno) {
4229
    case TT_IVEC:
4230
        change_pstate(PS_PEF | PS_PRIV | PS_IG);
4231
        break;
4232
    case TT_TFAULT:
4233
    case TT_DFAULT:
4234
    case TT_TMISS ... TT_TMISS + 3:
4235
    case TT_DMISS ... TT_DMISS + 3:
4236
    case TT_DPROT ... TT_DPROT + 3:
4237
        change_pstate(PS_PEF | PS_PRIV | PS_MG);
4238
        break;
4239
    default:
4240
        change_pstate(PS_PEF | PS_PRIV | PS_AG);
4241
        break;
4242
    }
4243

    
4244
    if (intno == TT_CLRWIN) {
4245
        set_cwp(cwp_dec(env->cwp - 1));
4246
    } else if ((intno & 0x1c0) == TT_SPILL) {
4247
        set_cwp(cwp_dec(env->cwp - env->cansave - 2));
4248
    } else if ((intno & 0x1c0) == TT_FILL) {
4249
        set_cwp(cwp_inc(env->cwp + 1));
4250
    }
4251
    env->tbr &= ~0x7fffULL;
4252
    env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
4253
    env->pc = env->tbr;
4254
    env->npc = env->pc + 4;
4255
    env->exception_index = -1;
4256
}
4257
#else
4258
#ifdef DEBUG_PCALL
4259
static const char * const excp_names[0x80] = {
4260
    [TT_TFAULT] = "Instruction Access Fault",
4261
    [TT_ILL_INSN] = "Illegal Instruction",
4262
    [TT_PRIV_INSN] = "Privileged Instruction",
4263
    [TT_NFPU_INSN] = "FPU Disabled",
4264
    [TT_WIN_OVF] = "Window Overflow",
4265
    [TT_WIN_UNF] = "Window Underflow",
4266
    [TT_UNALIGNED] = "Unaligned Memory Access",
4267
    [TT_FP_EXCP] = "FPU Exception",
4268
    [TT_DFAULT] = "Data Access Fault",
4269
    [TT_TOVF] = "Tag Overflow",
4270
    [TT_EXTINT | 0x1] = "External Interrupt 1",
4271
    [TT_EXTINT | 0x2] = "External Interrupt 2",
4272
    [TT_EXTINT | 0x3] = "External Interrupt 3",
4273
    [TT_EXTINT | 0x4] = "External Interrupt 4",
4274
    [TT_EXTINT | 0x5] = "External Interrupt 5",
4275
    [TT_EXTINT | 0x6] = "External Interrupt 6",
4276
    [TT_EXTINT | 0x7] = "External Interrupt 7",
4277
    [TT_EXTINT | 0x8] = "External Interrupt 8",
4278
    [TT_EXTINT | 0x9] = "External Interrupt 9",
4279
    [TT_EXTINT | 0xa] = "External Interrupt 10",
4280
    [TT_EXTINT | 0xb] = "External Interrupt 11",
4281
    [TT_EXTINT | 0xc] = "External Interrupt 12",
4282
    [TT_EXTINT | 0xd] = "External Interrupt 13",
4283
    [TT_EXTINT | 0xe] = "External Interrupt 14",
4284
    [TT_EXTINT | 0xf] = "External Interrupt 15",
4285
    [TT_TOVF] = "Tag Overflow",
4286
    [TT_CODE_ACCESS] = "Instruction Access Error",
4287
    [TT_DATA_ACCESS] = "Data Access Error",
4288
    [TT_DIV_ZERO] = "Division By Zero",
4289
    [TT_NCP_INSN] = "Coprocessor Disabled",
4290
};
4291
#endif
4292

    
4293
void do_interrupt(CPUState *env)
4294
{
4295
    int cwp, intno = env->exception_index;
4296

    
4297
#ifdef DEBUG_PCALL
4298
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
4299
        static int count;
4300
        const char *name;
4301

    
4302
        if (intno < 0 || intno >= 0x100)
4303
            name = "Unknown";
4304
        else if (intno >= 0x80)
4305
            name = "Trap Instruction";
4306
        else {
4307
            name = excp_names[intno];
4308
            if (!name)
4309
                name = "Unknown";
4310
        }
4311

    
4312
        qemu_log("%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
4313
                count, name, intno,
4314
                env->pc,
4315
                env->npc, env->regwptr[6]);
4316
        log_cpu_state(env, 0);
4317
#if 0
4318
        {
4319
            int i;
4320
            uint8_t *ptr;
4321

4322
            qemu_log("       code=");
4323
            ptr = (uint8_t *)env->pc;
4324
            for(i = 0; i < 16; i++) {
4325
                qemu_log(" %02x", ldub(ptr + i));
4326
            }
4327
            qemu_log("\n");
4328
        }
4329
#endif
4330
        count++;
4331
    }
4332
#endif
4333
#if !defined(CONFIG_USER_ONLY)
4334
    if (env->psret == 0) {
4335
        cpu_abort(env, "Trap 0x%02x while interrupts disabled, Error state",
4336
                  env->exception_index);
4337
        return;
4338
    }
4339
#endif
4340
    env->psret = 0;
4341
    cwp = cwp_dec(env->cwp - 1);
4342
    set_cwp(cwp);
4343
    env->regwptr[9] = env->pc;
4344
    env->regwptr[10] = env->npc;
4345
    env->psrps = env->psrs;
4346
    env->psrs = 1;
4347
    env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
4348
    env->pc = env->tbr;
4349
    env->npc = env->pc + 4;
4350
    env->exception_index = -1;
4351

    
4352
#if !defined(CONFIG_USER_ONLY)
4353
    /* IRQ acknowledgment */
4354
    if ((intno & ~15) == TT_EXTINT && env->qemu_irq_ack != NULL) {
4355
        env->qemu_irq_ack(env->irq_manager, intno);
4356
    }
4357
#endif
4358
}
4359
#endif
4360

    
4361
#if !defined(CONFIG_USER_ONLY)
4362

    
4363
static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4364
                                void *retaddr);
4365

    
4366
#define MMUSUFFIX _mmu
4367
#define ALIGNED_ONLY
4368

    
4369
#define SHIFT 0
4370
#include "softmmu_template.h"
4371

    
4372
#define SHIFT 1
4373
#include "softmmu_template.h"
4374

    
4375
#define SHIFT 2
4376
#include "softmmu_template.h"
4377

    
4378
#define SHIFT 3
4379
#include "softmmu_template.h"
4380

    
4381
/* XXX: make it generic ? */
4382
static void cpu_restore_state2(void *retaddr)
4383
{
4384
    TranslationBlock *tb;
4385
    unsigned long pc;
4386

    
4387
    if (retaddr) {
4388
        /* now we have a real cpu fault */
4389
        pc = (unsigned long)retaddr;
4390
        tb = tb_find_pc(pc);
4391
        if (tb) {
4392
            /* the PC is inside the translated code. It means that we have
4393
               a virtual CPU fault */
4394
            cpu_restore_state(tb, env, pc);
4395
        }
4396
    }
4397
}
4398

    
4399
static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4400
                                void *retaddr)
4401
{
4402
#ifdef DEBUG_UNALIGNED
4403
    printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
4404
           "\n", addr, env->pc);
4405
#endif
4406
    cpu_restore_state2(retaddr);
4407
    raise_exception(TT_UNALIGNED);
4408
}
4409

    
4410
/* try to fill the TLB and return an exception if error. If retaddr is
4411
   NULL, it means that the function was called in C code (i.e. not
4412
   from generated code or from helper.c) */
4413
/* XXX: fix it to restore all registers */
4414
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4415
{
4416
    int ret;
4417
    CPUState *saved_env;
4418

    
4419
    /* XXX: hack to restore env in all cases, even if not called from
4420
       generated code */
4421
    saved_env = env;
4422
    env = cpu_single_env;
4423

    
4424
    ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4425
    if (ret) {
4426
        cpu_restore_state2(retaddr);
4427
        cpu_loop_exit();
4428
    }
4429
    env = saved_env;
4430
}
4431

    
4432
#endif /* !CONFIG_USER_ONLY */
4433

    
4434
#ifndef TARGET_SPARC64
4435
#if !defined(CONFIG_USER_ONLY)
4436
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
4437
                          int is_asi, int size)
4438
{
4439
    CPUState *saved_env;
4440
    int fault_type;
4441

    
4442
    /* XXX: hack to restore env in all cases, even if not called from
4443
       generated code */
4444
    saved_env = env;
4445
    env = cpu_single_env;
4446
#ifdef DEBUG_UNASSIGNED
4447
    if (is_asi)
4448
        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4449
               " asi 0x%02x from " TARGET_FMT_lx "\n",
4450
               is_exec ? "exec" : is_write ? "write" : "read", size,
4451
               size == 1 ? "" : "s", addr, is_asi, env->pc);
4452
    else
4453
        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4454
               " from " TARGET_FMT_lx "\n",
4455
               is_exec ? "exec" : is_write ? "write" : "read", size,
4456
               size == 1 ? "" : "s", addr, env->pc);
4457
#endif
4458
    /* Don't overwrite translation and access faults */
4459
    fault_type = (env->mmuregs[3] & 0x1c) >> 2;
4460
    if ((fault_type > 4) || (fault_type == 0)) {
4461
        env->mmuregs[3] = 0; /* Fault status register */
4462
        if (is_asi)
4463
            env->mmuregs[3] |= 1 << 16;
4464
        if (env->psrs)
4465
            env->mmuregs[3] |= 1 << 5;
4466
        if (is_exec)
4467
            env->mmuregs[3] |= 1 << 6;
4468
        if (is_write)
4469
            env->mmuregs[3] |= 1 << 7;
4470
        env->mmuregs[3] |= (5 << 2) | 2;
4471
        /* SuperSPARC will never place instruction fault addresses in the FAR */
4472
        if (!is_exec) {
4473
            env->mmuregs[4] = addr; /* Fault address register */
4474
        }
4475
    }
4476
    /* overflow (same type fault was not read before another fault) */
4477
    if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
4478
        env->mmuregs[3] |= 1;
4479
    }
4480

    
4481
    if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
4482
        if (is_exec)
4483
            raise_exception(TT_CODE_ACCESS);
4484
        else
4485
            raise_exception(TT_DATA_ACCESS);
4486
    }
4487

    
4488
    /* flush neverland mappings created during no-fault mode,
4489
       so the sequential MMU faults report proper fault types */
4490
    if (env->mmuregs[0] & MMU_NF) {
4491
        tlb_flush(env, 1);
4492
    }
4493

    
4494
    env = saved_env;
4495
}
4496
#endif
4497
#else
4498
#if defined(CONFIG_USER_ONLY)
4499
static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
4500
                          int is_asi, int size)
4501
#else
4502
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
4503
                          int is_asi, int size)
4504
#endif
4505
{
4506
    CPUState *saved_env;
4507

    
4508
    /* XXX: hack to restore env in all cases, even if not called from
4509
       generated code */
4510
    saved_env = env;
4511
    env = cpu_single_env;
4512

    
4513
#ifdef DEBUG_UNASSIGNED
4514
    printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
4515
           "\n", addr, env->pc);
4516
#endif
4517

    
4518
    if (is_exec)
4519
        raise_exception(TT_CODE_ACCESS);
4520
    else
4521
        raise_exception(TT_DATA_ACCESS);
4522

    
4523
    env = saved_env;
4524
}
4525
#endif
4526

    
4527

    
4528
#ifdef TARGET_SPARC64
4529
void helper_tick_set_count(void *opaque, uint64_t count)
4530
{
4531
#if !defined(CONFIG_USER_ONLY)
4532
    cpu_tick_set_count(opaque, count);
4533
#endif
4534
}
4535

    
4536
uint64_t helper_tick_get_count(void *opaque)
4537
{
4538
#if !defined(CONFIG_USER_ONLY)
4539
    return cpu_tick_get_count(opaque);
4540
#else
4541
    return 0;
4542
#endif
4543
}
4544

    
4545
void helper_tick_set_limit(void *opaque, uint64_t limit)
4546
{
4547
#if !defined(CONFIG_USER_ONLY)
4548
    cpu_tick_set_limit(opaque, limit);
4549
#endif
4550
}
4551
#endif