Statistics
| Branch: | Revision:

root / target-sparc / op_helper.c @ 0e2fa9ca

History | View | Annotate | Download (116.4 kB)

1
#include "exec.h"
2
#include "host-utils.h"
3
#include "helper.h"
4

    
5
//#define DEBUG_MMU
6
//#define DEBUG_MXCC
7
//#define DEBUG_UNALIGNED
8
//#define DEBUG_UNASSIGNED
9
//#define DEBUG_ASI
10
//#define DEBUG_PCALL
11
//#define DEBUG_PSTATE
12

    
13
#ifdef DEBUG_MMU
14
#define DPRINTF_MMU(fmt, ...)                                   \
15
    do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
16
#else
17
#define DPRINTF_MMU(fmt, ...) do {} while (0)
18
#endif
19

    
20
#ifdef DEBUG_MXCC
21
#define DPRINTF_MXCC(fmt, ...)                                  \
22
    do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
23
#else
24
#define DPRINTF_MXCC(fmt, ...) do {} while (0)
25
#endif
26

    
27
#ifdef DEBUG_ASI
28
#define DPRINTF_ASI(fmt, ...)                                   \
29
    do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
30
#endif
31

    
32
#ifdef DEBUG_PSTATE
33
#define DPRINTF_PSTATE(fmt, ...)                                   \
34
    do { printf("PSTATE: " fmt , ## __VA_ARGS__); } while (0)
35
#else
36
#define DPRINTF_PSTATE(fmt, ...) do {} while (0)
37
#endif
38

    
39
#ifdef TARGET_SPARC64
40
#ifndef TARGET_ABI32
41
#define AM_CHECK(env1) ((env1)->pstate & PS_AM)
42
#else
43
#define AM_CHECK(env1) (1)
44
#endif
45
#endif
46

    
47
#define DT0 (env->dt0)
48
#define DT1 (env->dt1)
49
#define QT0 (env->qt0)
50
#define QT1 (env->qt1)
51

    
52
#if defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
53
static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
54
                          int is_asi, int size);
55
#endif
56

    
57
#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
58
// Calculates TSB pointer value for fault page size 8k or 64k
59
static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
60
                                       uint64_t tag_access_register,
61
                                       int page_size)
62
{
63
    uint64_t tsb_base = tsb_register & ~0x1fffULL;
64
    int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
65
    int tsb_size  = tsb_register & 0xf;
66

    
67
    // discard lower 13 bits which hold tag access context
68
    uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
69

    
70
    // now reorder bits
71
    uint64_t tsb_base_mask = ~0x1fffULL;
72
    uint64_t va = tag_access_va;
73

    
74
    // move va bits to correct position
75
    if (page_size == 8*1024) {
76
        va >>= 9;
77
    } else if (page_size == 64*1024) {
78
        va >>= 12;
79
    }
80

    
81
    if (tsb_size) {
82
        tsb_base_mask <<= tsb_size;
83
    }
84

    
85
    // calculate tsb_base mask and adjust va if split is in use
86
    if (tsb_split) {
87
        if (page_size == 8*1024) {
88
            va &= ~(1ULL << (13 + tsb_size));
89
        } else if (page_size == 64*1024) {
90
            va |= (1ULL << (13 + tsb_size));
91
        }
92
        tsb_base_mask <<= 1;
93
    }
94

    
95
    return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
96
}
97

    
98
// Calculates tag target register value by reordering bits
99
// in tag access register
100
static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
101
{
102
    return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
103
}
104

    
105
static void replace_tlb_entry(SparcTLBEntry *tlb,
106
                              uint64_t tlb_tag, uint64_t tlb_tte,
107
                              CPUState *env1)
108
{
109
    target_ulong mask, size, va, offset;
110

    
111
    // flush page range if translation is valid
112
    if (TTE_IS_VALID(tlb->tte)) {
113

    
114
        mask = 0xffffffffffffe000ULL;
115
        mask <<= 3 * ((tlb->tte >> 61) & 3);
116
        size = ~mask + 1;
117

    
118
        va = tlb->tag & mask;
119

    
120
        for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
121
            tlb_flush_page(env1, va + offset);
122
        }
123
    }
124

    
125
    tlb->tag = tlb_tag;
126
    tlb->tte = tlb_tte;
127
}
128

    
129
static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
130
                      const char* strmmu, CPUState *env1)
131
{
132
    unsigned int i;
133
    target_ulong mask;
134
    uint64_t context;
135

    
136
    int is_demap_context = (demap_addr >> 6) & 1;
137

    
138
    // demap context
139
    switch ((demap_addr >> 4) & 3) {
140
    case 0: // primary
141
        context = env1->dmmu.mmu_primary_context;
142
        break;
143
    case 1: // secondary
144
        context = env1->dmmu.mmu_secondary_context;
145
        break;
146
    case 2: // nucleus
147
        context = 0;
148
        break;
149
    case 3: // reserved
150
    default:
151
        return;
152
    }
153

    
154
    for (i = 0; i < 64; i++) {
155
        if (TTE_IS_VALID(tlb[i].tte)) {
156

    
157
            if (is_demap_context) {
158
                // will remove non-global entries matching context value
159
                if (TTE_IS_GLOBAL(tlb[i].tte) ||
160
                    !tlb_compare_context(&tlb[i], context)) {
161
                    continue;
162
                }
163
            } else {
164
                // demap page
165
                // will remove any entry matching VA
166
                mask = 0xffffffffffffe000ULL;
167
                mask <<= 3 * ((tlb[i].tte >> 61) & 3);
168

    
169
                if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
170
                    continue;
171
                }
172

    
173
                // entry should be global or matching context value
174
                if (!TTE_IS_GLOBAL(tlb[i].tte) &&
175
                    !tlb_compare_context(&tlb[i], context)) {
176
                    continue;
177
                }
178
            }
179

    
180
            replace_tlb_entry(&tlb[i], 0, 0, env1);
181
#ifdef DEBUG_MMU
182
            DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
183
            dump_mmu(env1);
184
#endif
185
        }
186
    }
187
}
188

    
189
static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
190
                                 uint64_t tlb_tag, uint64_t tlb_tte,
191
                                 const char* strmmu, CPUState *env1)
192
{
193
    unsigned int i, replace_used;
194

    
195
    // Try replacing invalid entry
196
    for (i = 0; i < 64; i++) {
197
        if (!TTE_IS_VALID(tlb[i].tte)) {
198
            replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
199
#ifdef DEBUG_MMU
200
            DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
201
            dump_mmu(env1);
202
#endif
203
            return;
204
        }
205
    }
206

    
207
    // All entries are valid, try replacing unlocked entry
208

    
209
    for (replace_used = 0; replace_used < 2; ++replace_used) {
210

    
211
        // Used entries are not replaced on first pass
212

    
213
        for (i = 0; i < 64; i++) {
214
            if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
215

    
216
                replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
217
#ifdef DEBUG_MMU
218
                DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
219
                            strmmu, (replace_used?"used":"unused"), i);
220
                dump_mmu(env1);
221
#endif
222
                return;
223
            }
224
        }
225

    
226
        // Now reset used bit and search for unused entries again
227

    
228
        for (i = 0; i < 64; i++) {
229
            TTE_SET_UNUSED(tlb[i].tte);
230
        }
231
    }
232

    
233
#ifdef DEBUG_MMU
234
    DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
235
#endif
236
    // error state?
237
}
238

    
239
#endif
240

    
241
static inline target_ulong address_mask(CPUState *env1, target_ulong addr)
242
{
243
#ifdef TARGET_SPARC64
244
    if (AM_CHECK(env1))
245
        addr &= 0xffffffffULL;
246
#endif
247
    return addr;
248
}
249

    
250
/* returns true if access using this ASI is to have address translated by MMU
251
   otherwise access is to raw physical address */
252
static inline int is_translating_asi(int asi)
253
{
254
#ifdef TARGET_SPARC64
255
    /* Ultrasparc IIi translating asi
256
       - note this list is defined by cpu implementation
257
     */
258
    switch (asi) {
259
    case 0x04 ... 0x11:
260
    case 0x18 ... 0x19:
261
    case 0x24 ... 0x2C:
262
    case 0x70 ... 0x73:
263
    case 0x78 ... 0x79:
264
    case 0x80 ... 0xFF:
265
        return 1;
266

    
267
    default:
268
        return 0;
269
    }
270
#else
271
    /* TODO: check sparc32 bits */
272
    return 0;
273
#endif
274
}
275

    
276
static inline target_ulong asi_address_mask(CPUState *env1,
277
                                            int asi, target_ulong addr)
278
{
279
    if (is_translating_asi(asi)) {
280
        return address_mask(env, addr);
281
    } else {
282
        return addr;
283
    }
284
}
285

    
286
static void raise_exception(int tt)
287
{
288
    env->exception_index = tt;
289
    cpu_loop_exit();
290
}
291

    
292
void HELPER(raise_exception)(int tt)
293
{
294
    raise_exception(tt);
295
}
296

    
297
void helper_check_align(target_ulong addr, uint32_t align)
298
{
299
    if (addr & align) {
300
#ifdef DEBUG_UNALIGNED
301
    printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
302
           "\n", addr, env->pc);
303
#endif
304
        raise_exception(TT_UNALIGNED);
305
    }
306
}
307

    
308
#define F_HELPER(name, p) void helper_f##name##p(void)
309

    
310
#define F_BINOP(name)                                           \
311
    float32 helper_f ## name ## s (float32 src1, float32 src2)  \
312
    {                                                           \
313
        return float32_ ## name (src1, src2, &env->fp_status);  \
314
    }                                                           \
315
    F_HELPER(name, d)                                           \
316
    {                                                           \
317
        DT0 = float64_ ## name (DT0, DT1, &env->fp_status);     \
318
    }                                                           \
319
    F_HELPER(name, q)                                           \
320
    {                                                           \
321
        QT0 = float128_ ## name (QT0, QT1, &env->fp_status);    \
322
    }
323

    
324
F_BINOP(add);
325
F_BINOP(sub);
326
F_BINOP(mul);
327
F_BINOP(div);
328
#undef F_BINOP
329

    
330
void helper_fsmuld(float32 src1, float32 src2)
331
{
332
    DT0 = float64_mul(float32_to_float64(src1, &env->fp_status),
333
                      float32_to_float64(src2, &env->fp_status),
334
                      &env->fp_status);
335
}
336

    
337
void helper_fdmulq(void)
338
{
339
    QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
340
                       float64_to_float128(DT1, &env->fp_status),
341
                       &env->fp_status);
342
}
343

    
344
float32 helper_fnegs(float32 src)
345
{
346
    return float32_chs(src);
347
}
348

    
349
#ifdef TARGET_SPARC64
350
F_HELPER(neg, d)
351
{
352
    DT0 = float64_chs(DT1);
353
}
354

    
355
F_HELPER(neg, q)
356
{
357
    QT0 = float128_chs(QT1);
358
}
359
#endif
360

    
361
/* Integer to float conversion.  */
362
float32 helper_fitos(int32_t src)
363
{
364
    return int32_to_float32(src, &env->fp_status);
365
}
366

    
367
void helper_fitod(int32_t src)
368
{
369
    DT0 = int32_to_float64(src, &env->fp_status);
370
}
371

    
372
void helper_fitoq(int32_t src)
373
{
374
    QT0 = int32_to_float128(src, &env->fp_status);
375
}
376

    
377
#ifdef TARGET_SPARC64
378
float32 helper_fxtos(void)
379
{
380
    return int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
381
}
382

    
383
F_HELPER(xto, d)
384
{
385
    DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
386
}
387

    
388
F_HELPER(xto, q)
389
{
390
    QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
391
}
392
#endif
393
#undef F_HELPER
394

    
395
/* floating point conversion */
396
float32 helper_fdtos(void)
397
{
398
    return float64_to_float32(DT1, &env->fp_status);
399
}
400

    
401
void helper_fstod(float32 src)
402
{
403
    DT0 = float32_to_float64(src, &env->fp_status);
404
}
405

    
406
float32 helper_fqtos(void)
407
{
408
    return float128_to_float32(QT1, &env->fp_status);
409
}
410

    
411
void helper_fstoq(float32 src)
412
{
413
    QT0 = float32_to_float128(src, &env->fp_status);
414
}
415

    
416
void helper_fqtod(void)
417
{
418
    DT0 = float128_to_float64(QT1, &env->fp_status);
419
}
420

    
421
void helper_fdtoq(void)
422
{
423
    QT0 = float64_to_float128(DT1, &env->fp_status);
424
}
425

    
426
/* Float to integer conversion.  */
427
int32_t helper_fstoi(float32 src)
428
{
429
    return float32_to_int32_round_to_zero(src, &env->fp_status);
430
}
431

    
432
int32_t helper_fdtoi(void)
433
{
434
    return float64_to_int32_round_to_zero(DT1, &env->fp_status);
435
}
436

    
437
int32_t helper_fqtoi(void)
438
{
439
    return float128_to_int32_round_to_zero(QT1, &env->fp_status);
440
}
441

    
442
#ifdef TARGET_SPARC64
443
void helper_fstox(float32 src)
444
{
445
    *((int64_t *)&DT0) = float32_to_int64_round_to_zero(src, &env->fp_status);
446
}
447

    
448
void helper_fdtox(void)
449
{
450
    *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
451
}
452

    
453
void helper_fqtox(void)
454
{
455
    *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
456
}
457

    
458
void helper_faligndata(void)
459
{
460
    uint64_t tmp;
461

    
462
    tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
463
    /* on many architectures a shift of 64 does nothing */
464
    if ((env->gsr & 7) != 0) {
465
        tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
466
    }
467
    *((uint64_t *)&DT0) = tmp;
468
}
469

    
470
#ifdef HOST_WORDS_BIGENDIAN
471
#define VIS_B64(n) b[7 - (n)]
472
#define VIS_W64(n) w[3 - (n)]
473
#define VIS_SW64(n) sw[3 - (n)]
474
#define VIS_L64(n) l[1 - (n)]
475
#define VIS_B32(n) b[3 - (n)]
476
#define VIS_W32(n) w[1 - (n)]
477
#else
478
#define VIS_B64(n) b[n]
479
#define VIS_W64(n) w[n]
480
#define VIS_SW64(n) sw[n]
481
#define VIS_L64(n) l[n]
482
#define VIS_B32(n) b[n]
483
#define VIS_W32(n) w[n]
484
#endif
485

    
486
typedef union {
487
    uint8_t b[8];
488
    uint16_t w[4];
489
    int16_t sw[4];
490
    uint32_t l[2];
491
    float64 d;
492
} vis64;
493

    
494
typedef union {
495
    uint8_t b[4];
496
    uint16_t w[2];
497
    uint32_t l;
498
    float32 f;
499
} vis32;
500

    
501
void helper_fpmerge(void)
502
{
503
    vis64 s, d;
504

    
505
    s.d = DT0;
506
    d.d = DT1;
507

    
508
    // Reverse calculation order to handle overlap
509
    d.VIS_B64(7) = s.VIS_B64(3);
510
    d.VIS_B64(6) = d.VIS_B64(3);
511
    d.VIS_B64(5) = s.VIS_B64(2);
512
    d.VIS_B64(4) = d.VIS_B64(2);
513
    d.VIS_B64(3) = s.VIS_B64(1);
514
    d.VIS_B64(2) = d.VIS_B64(1);
515
    d.VIS_B64(1) = s.VIS_B64(0);
516
    //d.VIS_B64(0) = d.VIS_B64(0);
517

    
518
    DT0 = d.d;
519
}
520

    
521
void helper_fmul8x16(void)
522
{
523
    vis64 s, d;
524
    uint32_t tmp;
525

    
526
    s.d = DT0;
527
    d.d = DT1;
528

    
529
#define PMUL(r)                                                 \
530
    tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r);       \
531
    if ((tmp & 0xff) > 0x7f)                                    \
532
        tmp += 0x100;                                           \
533
    d.VIS_W64(r) = tmp >> 8;
534

    
535
    PMUL(0);
536
    PMUL(1);
537
    PMUL(2);
538
    PMUL(3);
539
#undef PMUL
540

    
541
    DT0 = d.d;
542
}
543

    
544
void helper_fmul8x16al(void)
545
{
546
    vis64 s, d;
547
    uint32_t tmp;
548

    
549
    s.d = DT0;
550
    d.d = DT1;
551

    
552
#define PMUL(r)                                                 \
553
    tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r);       \
554
    if ((tmp & 0xff) > 0x7f)                                    \
555
        tmp += 0x100;                                           \
556
    d.VIS_W64(r) = tmp >> 8;
557

    
558
    PMUL(0);
559
    PMUL(1);
560
    PMUL(2);
561
    PMUL(3);
562
#undef PMUL
563

    
564
    DT0 = d.d;
565
}
566

    
567
void helper_fmul8x16au(void)
568
{
569
    vis64 s, d;
570
    uint32_t tmp;
571

    
572
    s.d = DT0;
573
    d.d = DT1;
574

    
575
#define PMUL(r)                                                 \
576
    tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r);       \
577
    if ((tmp & 0xff) > 0x7f)                                    \
578
        tmp += 0x100;                                           \
579
    d.VIS_W64(r) = tmp >> 8;
580

    
581
    PMUL(0);
582
    PMUL(1);
583
    PMUL(2);
584
    PMUL(3);
585
#undef PMUL
586

    
587
    DT0 = d.d;
588
}
589

    
590
void helper_fmul8sux16(void)
591
{
592
    vis64 s, d;
593
    uint32_t tmp;
594

    
595
    s.d = DT0;
596
    d.d = DT1;
597

    
598
#define PMUL(r)                                                         \
599
    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
600
    if ((tmp & 0xff) > 0x7f)                                            \
601
        tmp += 0x100;                                                   \
602
    d.VIS_W64(r) = tmp >> 8;
603

    
604
    PMUL(0);
605
    PMUL(1);
606
    PMUL(2);
607
    PMUL(3);
608
#undef PMUL
609

    
610
    DT0 = d.d;
611
}
612

    
613
void helper_fmul8ulx16(void)
614
{
615
    vis64 s, d;
616
    uint32_t tmp;
617

    
618
    s.d = DT0;
619
    d.d = DT1;
620

    
621
#define PMUL(r)                                                         \
622
    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
623
    if ((tmp & 0xff) > 0x7f)                                            \
624
        tmp += 0x100;                                                   \
625
    d.VIS_W64(r) = tmp >> 8;
626

    
627
    PMUL(0);
628
    PMUL(1);
629
    PMUL(2);
630
    PMUL(3);
631
#undef PMUL
632

    
633
    DT0 = d.d;
634
}
635

    
636
void helper_fmuld8sux16(void)
637
{
638
    vis64 s, d;
639
    uint32_t tmp;
640

    
641
    s.d = DT0;
642
    d.d = DT1;
643

    
644
#define PMUL(r)                                                         \
645
    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
646
    if ((tmp & 0xff) > 0x7f)                                            \
647
        tmp += 0x100;                                                   \
648
    d.VIS_L64(r) = tmp;
649

    
650
    // Reverse calculation order to handle overlap
651
    PMUL(1);
652
    PMUL(0);
653
#undef PMUL
654

    
655
    DT0 = d.d;
656
}
657

    
658
void helper_fmuld8ulx16(void)
659
{
660
    vis64 s, d;
661
    uint32_t tmp;
662

    
663
    s.d = DT0;
664
    d.d = DT1;
665

    
666
#define PMUL(r)                                                         \
667
    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
668
    if ((tmp & 0xff) > 0x7f)                                            \
669
        tmp += 0x100;                                                   \
670
    d.VIS_L64(r) = tmp;
671

    
672
    // Reverse calculation order to handle overlap
673
    PMUL(1);
674
    PMUL(0);
675
#undef PMUL
676

    
677
    DT0 = d.d;
678
}
679

    
680
void helper_fexpand(void)
681
{
682
    vis32 s;
683
    vis64 d;
684

    
685
    s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
686
    d.d = DT1;
687
    d.VIS_W64(0) = s.VIS_B32(0) << 4;
688
    d.VIS_W64(1) = s.VIS_B32(1) << 4;
689
    d.VIS_W64(2) = s.VIS_B32(2) << 4;
690
    d.VIS_W64(3) = s.VIS_B32(3) << 4;
691

    
692
    DT0 = d.d;
693
}
694

    
695
#define VIS_HELPER(name, F)                             \
696
    void name##16(void)                                 \
697
    {                                                   \
698
        vis64 s, d;                                     \
699
                                                        \
700
        s.d = DT0;                                      \
701
        d.d = DT1;                                      \
702
                                                        \
703
        d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0));   \
704
        d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1));   \
705
        d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2));   \
706
        d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3));   \
707
                                                        \
708
        DT0 = d.d;                                      \
709
    }                                                   \
710
                                                        \
711
    uint32_t name##16s(uint32_t src1, uint32_t src2)    \
712
    {                                                   \
713
        vis32 s, d;                                     \
714
                                                        \
715
        s.l = src1;                                     \
716
        d.l = src2;                                     \
717
                                                        \
718
        d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0));   \
719
        d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1));   \
720
                                                        \
721
        return d.l;                                     \
722
    }                                                   \
723
                                                        \
724
    void name##32(void)                                 \
725
    {                                                   \
726
        vis64 s, d;                                     \
727
                                                        \
728
        s.d = DT0;                                      \
729
        d.d = DT1;                                      \
730
                                                        \
731
        d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0));   \
732
        d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1));   \
733
                                                        \
734
        DT0 = d.d;                                      \
735
    }                                                   \
736
                                                        \
737
    uint32_t name##32s(uint32_t src1, uint32_t src2)    \
738
    {                                                   \
739
        vis32 s, d;                                     \
740
                                                        \
741
        s.l = src1;                                     \
742
        d.l = src2;                                     \
743
                                                        \
744
        d.l = F(d.l, s.l);                              \
745
                                                        \
746
        return d.l;                                     \
747
    }
748

    
749
#define FADD(a, b) ((a) + (b))
750
#define FSUB(a, b) ((a) - (b))
751
VIS_HELPER(helper_fpadd, FADD)
752
VIS_HELPER(helper_fpsub, FSUB)
753

    
754
#define VIS_CMPHELPER(name, F)                                        \
755
    void name##16(void)                                           \
756
    {                                                             \
757
        vis64 s, d;                                               \
758
                                                                  \
759
        s.d = DT0;                                                \
760
        d.d = DT1;                                                \
761
                                                                  \
762
        d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0;       \
763
        d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0;      \
764
        d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0;      \
765
        d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0;      \
766
                                                                  \
767
        DT0 = d.d;                                                \
768
    }                                                             \
769
                                                                  \
770
    void name##32(void)                                           \
771
    {                                                             \
772
        vis64 s, d;                                               \
773
                                                                  \
774
        s.d = DT0;                                                \
775
        d.d = DT1;                                                \
776
                                                                  \
777
        d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0;       \
778
        d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0;      \
779
                                                                  \
780
        DT0 = d.d;                                                \
781
    }
782

    
783
#define FCMPGT(a, b) ((a) > (b))
784
#define FCMPEQ(a, b) ((a) == (b))
785
#define FCMPLE(a, b) ((a) <= (b))
786
#define FCMPNE(a, b) ((a) != (b))
787

    
788
VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
789
VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
790
VIS_CMPHELPER(helper_fcmple, FCMPLE)
791
VIS_CMPHELPER(helper_fcmpne, FCMPNE)
792
#endif
793

    
794
void helper_check_ieee_exceptions(void)
795
{
796
    target_ulong status;
797

    
798
    status = get_float_exception_flags(&env->fp_status);
799
    if (status) {
800
        /* Copy IEEE 754 flags into FSR */
801
        if (status & float_flag_invalid)
802
            env->fsr |= FSR_NVC;
803
        if (status & float_flag_overflow)
804
            env->fsr |= FSR_OFC;
805
        if (status & float_flag_underflow)
806
            env->fsr |= FSR_UFC;
807
        if (status & float_flag_divbyzero)
808
            env->fsr |= FSR_DZC;
809
        if (status & float_flag_inexact)
810
            env->fsr |= FSR_NXC;
811

    
812
        if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
813
            /* Unmasked exception, generate a trap */
814
            env->fsr |= FSR_FTT_IEEE_EXCP;
815
            raise_exception(TT_FP_EXCP);
816
        } else {
817
            /* Accumulate exceptions */
818
            env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
819
        }
820
    }
821
}
822

    
823
void helper_clear_float_exceptions(void)
824
{
825
    set_float_exception_flags(0, &env->fp_status);
826
}
827

    
828
float32 helper_fabss(float32 src)
829
{
830
    return float32_abs(src);
831
}
832

    
833
#ifdef TARGET_SPARC64
834
void helper_fabsd(void)
835
{
836
    DT0 = float64_abs(DT1);
837
}
838

    
839
void helper_fabsq(void)
840
{
841
    QT0 = float128_abs(QT1);
842
}
843
#endif
844

    
845
float32 helper_fsqrts(float32 src)
846
{
847
    return float32_sqrt(src, &env->fp_status);
848
}
849

    
850
void helper_fsqrtd(void)
851
{
852
    DT0 = float64_sqrt(DT1, &env->fp_status);
853
}
854

    
855
void helper_fsqrtq(void)
856
{
857
    QT0 = float128_sqrt(QT1, &env->fp_status);
858
}
859

    
860
#define GEN_FCMP(name, size, reg1, reg2, FS, TRAP)                      \
861
    void glue(helper_, name) (void)                                     \
862
    {                                                                   \
863
        target_ulong new_fsr;                                           \
864
                                                                        \
865
        env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                     \
866
        switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) {   \
867
        case float_relation_unordered:                                  \
868
            new_fsr = (FSR_FCC1 | FSR_FCC0) << FS;                      \
869
            if ((env->fsr & FSR_NVM) || TRAP) {                         \
870
                env->fsr |= new_fsr;                                    \
871
                env->fsr |= FSR_NVC;                                    \
872
                env->fsr |= FSR_FTT_IEEE_EXCP;                          \
873
                raise_exception(TT_FP_EXCP);                            \
874
            } else {                                                    \
875
                env->fsr |= FSR_NVA;                                    \
876
            }                                                           \
877
            break;                                                      \
878
        case float_relation_less:                                       \
879
            new_fsr = FSR_FCC0 << FS;                                   \
880
            break;                                                      \
881
        case float_relation_greater:                                    \
882
            new_fsr = FSR_FCC1 << FS;                                   \
883
            break;                                                      \
884
        default:                                                        \
885
            new_fsr = 0;                                                \
886
            break;                                                      \
887
        }                                                               \
888
        env->fsr |= new_fsr;                                            \
889
    }
890
#define GEN_FCMPS(name, size, FS, TRAP)                                 \
891
    void glue(helper_, name)(float32 src1, float32 src2)                \
892
    {                                                                   \
893
        target_ulong new_fsr;                                           \
894
                                                                        \
895
        env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                     \
896
        switch (glue(size, _compare) (src1, src2, &env->fp_status)) {   \
897
        case float_relation_unordered:                                  \
898
            new_fsr = (FSR_FCC1 | FSR_FCC0) << FS;                      \
899
            if ((env->fsr & FSR_NVM) || TRAP) {                         \
900
                env->fsr |= new_fsr;                                    \
901
                env->fsr |= FSR_NVC;                                    \
902
                env->fsr |= FSR_FTT_IEEE_EXCP;                          \
903
                raise_exception(TT_FP_EXCP);                            \
904
            } else {                                                    \
905
                env->fsr |= FSR_NVA;                                    \
906
            }                                                           \
907
            break;                                                      \
908
        case float_relation_less:                                       \
909
            new_fsr = FSR_FCC0 << FS;                                   \
910
            break;                                                      \
911
        case float_relation_greater:                                    \
912
            new_fsr = FSR_FCC1 << FS;                                   \
913
            break;                                                      \
914
        default:                                                        \
915
            new_fsr = 0;                                                \
916
            break;                                                      \
917
        }                                                               \
918
        env->fsr |= new_fsr;                                            \
919
    }
920

    
921
GEN_FCMPS(fcmps, float32, 0, 0);
922
GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
923

    
924
GEN_FCMPS(fcmpes, float32, 0, 1);
925
GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
926

    
927
GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
928
GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
929

    
930
static uint32_t compute_all_flags(void)
931
{
932
    return env->psr & PSR_ICC;
933
}
934

    
935
static uint32_t compute_C_flags(void)
936
{
937
    return env->psr & PSR_CARRY;
938
}
939

    
940
static inline uint32_t get_NZ_icc(int32_t dst)
941
{
942
    uint32_t ret = 0;
943

    
944
    if (dst == 0) {
945
        ret = PSR_ZERO;
946
    } else if (dst < 0) {
947
        ret = PSR_NEG;
948
    }
949
    return ret;
950
}
951

    
952
#ifdef TARGET_SPARC64
953
static uint32_t compute_all_flags_xcc(void)
954
{
955
    return env->xcc & PSR_ICC;
956
}
957

    
958
static uint32_t compute_C_flags_xcc(void)
959
{
960
    return env->xcc & PSR_CARRY;
961
}
962

    
963
static inline uint32_t get_NZ_xcc(target_long dst)
964
{
965
    uint32_t ret = 0;
966

    
967
    if (!dst) {
968
        ret = PSR_ZERO;
969
    } else if (dst < 0) {
970
        ret = PSR_NEG;
971
    }
972
    return ret;
973
}
974
#endif
975

    
976
static inline uint32_t get_V_div_icc(target_ulong src2)
977
{
978
    uint32_t ret = 0;
979

    
980
    if (src2 != 0) {
981
        ret = PSR_OVF;
982
    }
983
    return ret;
984
}
985

    
986
static uint32_t compute_all_div(void)
987
{
988
    uint32_t ret;
989

    
990
    ret = get_NZ_icc(CC_DST);
991
    ret |= get_V_div_icc(CC_SRC2);
992
    return ret;
993
}
994

    
995
static uint32_t compute_C_div(void)
996
{
997
    return 0;
998
}
999

    
1000
static inline uint32_t get_C_add_icc(uint32_t dst, uint32_t src1)
1001
{
1002
    uint32_t ret = 0;
1003

    
1004
    if (dst < src1) {
1005
        ret = PSR_CARRY;
1006
    }
1007
    return ret;
1008
}
1009

    
1010
static inline uint32_t get_C_addx_icc(uint32_t dst, uint32_t src1,
1011
                                      uint32_t src2)
1012
{
1013
    uint32_t ret = 0;
1014

    
1015
    if (((src1 & src2) | (~dst & (src1 | src2))) & (1U << 31)) {
1016
        ret = PSR_CARRY;
1017
    }
1018
    return ret;
1019
}
1020

    
1021
static inline uint32_t get_V_add_icc(uint32_t dst, uint32_t src1,
1022
                                     uint32_t src2)
1023
{
1024
    uint32_t ret = 0;
1025

    
1026
    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1U << 31)) {
1027
        ret = PSR_OVF;
1028
    }
1029
    return ret;
1030
}
1031

    
1032
#ifdef TARGET_SPARC64
1033
static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1)
1034
{
1035
    uint32_t ret = 0;
1036

    
1037
    if (dst < src1) {
1038
        ret = PSR_CARRY;
1039
    }
1040
    return ret;
1041
}
1042

    
1043
static inline uint32_t get_C_addx_xcc(target_ulong dst, target_ulong src1,
1044
                                      target_ulong src2)
1045
{
1046
    uint32_t ret = 0;
1047

    
1048
    if (((src1 & src2) | (~dst & (src1 | src2))) & (1ULL << 63)) {
1049
        ret = PSR_CARRY;
1050
    }
1051
    return ret;
1052
}
1053

    
1054
static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1,
1055
                                         target_ulong src2)
1056
{
1057
    uint32_t ret = 0;
1058

    
1059
    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63)) {
1060
        ret = PSR_OVF;
1061
    }
1062
    return ret;
1063
}
1064

    
1065
static uint32_t compute_all_add_xcc(void)
1066
{
1067
    uint32_t ret;
1068

    
1069
    ret = get_NZ_xcc(CC_DST);
1070
    ret |= get_C_add_xcc(CC_DST, CC_SRC);
1071
    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1072
    return ret;
1073
}
1074

    
1075
static uint32_t compute_C_add_xcc(void)
1076
{
1077
    return get_C_add_xcc(CC_DST, CC_SRC);
1078
}
1079
#endif
1080

    
1081
static uint32_t compute_all_add(void)
1082
{
1083
    uint32_t ret;
1084

    
1085
    ret = get_NZ_icc(CC_DST);
1086
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1087
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1088
    return ret;
1089
}
1090

    
1091
static uint32_t compute_C_add(void)
1092
{
1093
    return get_C_add_icc(CC_DST, CC_SRC);
1094
}
1095

    
1096
#ifdef TARGET_SPARC64
1097
static uint32_t compute_all_addx_xcc(void)
1098
{
1099
    uint32_t ret;
1100

    
1101
    ret = get_NZ_xcc(CC_DST);
1102
    ret |= get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1103
    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1104
    return ret;
1105
}
1106

    
1107
static uint32_t compute_C_addx_xcc(void)
1108
{
1109
    uint32_t ret;
1110

    
1111
    ret = get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1112
    return ret;
1113
}
1114
#endif
1115

    
1116
static uint32_t compute_all_addx(void)
1117
{
1118
    uint32_t ret;
1119

    
1120
    ret = get_NZ_icc(CC_DST);
1121
    ret |= get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1122
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1123
    return ret;
1124
}
1125

    
1126
static uint32_t compute_C_addx(void)
1127
{
1128
    uint32_t ret;
1129

    
1130
    ret = get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1131
    return ret;
1132
}
1133

    
1134
static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2)
1135
{
1136
    uint32_t ret = 0;
1137

    
1138
    if ((src1 | src2) & 0x3) {
1139
        ret = PSR_OVF;
1140
    }
1141
    return ret;
1142
}
1143

    
1144
static uint32_t compute_all_tadd(void)
1145
{
1146
    uint32_t ret;
1147

    
1148
    ret = get_NZ_icc(CC_DST);
1149
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1150
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1151
    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1152
    return ret;
1153
}
1154

    
1155
static uint32_t compute_all_taddtv(void)
1156
{
1157
    uint32_t ret;
1158

    
1159
    ret = get_NZ_icc(CC_DST);
1160
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1161
    return ret;
1162
}
1163

    
1164
static inline uint32_t get_C_sub_icc(uint32_t src1, uint32_t src2)
1165
{
1166
    uint32_t ret = 0;
1167

    
1168
    if (src1 < src2) {
1169
        ret = PSR_CARRY;
1170
    }
1171
    return ret;
1172
}
1173

    
1174
static inline uint32_t get_C_subx_icc(uint32_t dst, uint32_t src1,
1175
                                      uint32_t src2)
1176
{
1177
    uint32_t ret = 0;
1178

    
1179
    if (((~src1 & src2) | (dst & (~src1 | src2))) & (1U << 31)) {
1180
        ret = PSR_CARRY;
1181
    }
1182
    return ret;
1183
}
1184

    
1185
static inline uint32_t get_V_sub_icc(uint32_t dst, uint32_t src1,
1186
                                     uint32_t src2)
1187
{
1188
    uint32_t ret = 0;
1189

    
1190
    if (((src1 ^ src2) & (src1 ^ dst)) & (1U << 31)) {
1191
        ret = PSR_OVF;
1192
    }
1193
    return ret;
1194
}
1195

    
1196

    
1197
#ifdef TARGET_SPARC64
1198
static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2)
1199
{
1200
    uint32_t ret = 0;
1201

    
1202
    if (src1 < src2) {
1203
        ret = PSR_CARRY;
1204
    }
1205
    return ret;
1206
}
1207

    
1208
static inline uint32_t get_C_subx_xcc(target_ulong dst, target_ulong src1,
1209
                                      target_ulong src2)
1210
{
1211
    uint32_t ret = 0;
1212

    
1213
    if (((~src1 & src2) | (dst & (~src1 | src2))) & (1ULL << 63)) {
1214
        ret = PSR_CARRY;
1215
    }
1216
    return ret;
1217
}
1218

    
1219
static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1,
1220
                                     target_ulong src2)
1221
{
1222
    uint32_t ret = 0;
1223

    
1224
    if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63)) {
1225
        ret = PSR_OVF;
1226
    }
1227
    return ret;
1228
}
1229

    
1230
static uint32_t compute_all_sub_xcc(void)
1231
{
1232
    uint32_t ret;
1233

    
1234
    ret = get_NZ_xcc(CC_DST);
1235
    ret |= get_C_sub_xcc(CC_SRC, CC_SRC2);
1236
    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1237
    return ret;
1238
}
1239

    
1240
static uint32_t compute_C_sub_xcc(void)
1241
{
1242
    return get_C_sub_xcc(CC_SRC, CC_SRC2);
1243
}
1244
#endif
1245

    
1246
static uint32_t compute_all_sub(void)
1247
{
1248
    uint32_t ret;
1249

    
1250
    ret = get_NZ_icc(CC_DST);
1251
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1252
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1253
    return ret;
1254
}
1255

    
1256
static uint32_t compute_C_sub(void)
1257
{
1258
    return get_C_sub_icc(CC_SRC, CC_SRC2);
1259
}
1260

    
1261
#ifdef TARGET_SPARC64
1262
static uint32_t compute_all_subx_xcc(void)
1263
{
1264
    uint32_t ret;
1265

    
1266
    ret = get_NZ_xcc(CC_DST);
1267
    ret |= get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1268
    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1269
    return ret;
1270
}
1271

    
1272
static uint32_t compute_C_subx_xcc(void)
1273
{
1274
    uint32_t ret;
1275

    
1276
    ret = get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1277
    return ret;
1278
}
1279
#endif
1280

    
1281
static uint32_t compute_all_subx(void)
1282
{
1283
    uint32_t ret;
1284

    
1285
    ret = get_NZ_icc(CC_DST);
1286
    ret |= get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1287
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1288
    return ret;
1289
}
1290

    
1291
static uint32_t compute_C_subx(void)
1292
{
1293
    uint32_t ret;
1294

    
1295
    ret = get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1296
    return ret;
1297
}
1298

    
1299
static uint32_t compute_all_tsub(void)
1300
{
1301
    uint32_t ret;
1302

    
1303
    ret = get_NZ_icc(CC_DST);
1304
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1305
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1306
    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1307
    return ret;
1308
}
1309

    
1310
static uint32_t compute_all_tsubtv(void)
1311
{
1312
    uint32_t ret;
1313

    
1314
    ret = get_NZ_icc(CC_DST);
1315
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1316
    return ret;
1317
}
1318

    
1319
static uint32_t compute_all_logic(void)
1320
{
1321
    return get_NZ_icc(CC_DST);
1322
}
1323

    
1324
static uint32_t compute_C_logic(void)
1325
{
1326
    return 0;
1327
}
1328

    
1329
#ifdef TARGET_SPARC64
1330
static uint32_t compute_all_logic_xcc(void)
1331
{
1332
    return get_NZ_xcc(CC_DST);
1333
}
1334
#endif
1335

    
1336
typedef struct CCTable {
1337
    uint32_t (*compute_all)(void); /* return all the flags */
1338
    uint32_t (*compute_c)(void);  /* return the C flag */
1339
} CCTable;
1340

    
1341
static const CCTable icc_table[CC_OP_NB] = {
1342
    /* CC_OP_DYNAMIC should never happen */
1343
    [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags },
1344
    [CC_OP_DIV] = { compute_all_div, compute_C_div },
1345
    [CC_OP_ADD] = { compute_all_add, compute_C_add },
1346
    [CC_OP_ADDX] = { compute_all_addx, compute_C_addx },
1347
    [CC_OP_TADD] = { compute_all_tadd, compute_C_add },
1348
    [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_add },
1349
    [CC_OP_SUB] = { compute_all_sub, compute_C_sub },
1350
    [CC_OP_SUBX] = { compute_all_subx, compute_C_subx },
1351
    [CC_OP_TSUB] = { compute_all_tsub, compute_C_sub },
1352
    [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_sub },
1353
    [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic },
1354
};
1355

    
1356
#ifdef TARGET_SPARC64
1357
static const CCTable xcc_table[CC_OP_NB] = {
1358
    /* CC_OP_DYNAMIC should never happen */
1359
    [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc },
1360
    [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic },
1361
    [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc },
1362
    [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc },
1363
    [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc },
1364
    [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc },
1365
    [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1366
    [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc },
1367
    [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1368
    [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc },
1369
    [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic },
1370
};
1371
#endif
1372

    
1373
void helper_compute_psr(void)
1374
{
1375
    uint32_t new_psr;
1376

    
1377
    new_psr = icc_table[CC_OP].compute_all();
1378
    env->psr = new_psr;
1379
#ifdef TARGET_SPARC64
1380
    new_psr = xcc_table[CC_OP].compute_all();
1381
    env->xcc = new_psr;
1382
#endif
1383
    CC_OP = CC_OP_FLAGS;
1384
}
1385

    
1386
uint32_t helper_compute_C_icc(void)
1387
{
1388
    uint32_t ret;
1389

    
1390
    ret = icc_table[CC_OP].compute_c() >> PSR_CARRY_SHIFT;
1391
    return ret;
1392
}
1393

    
1394
static inline void memcpy32(target_ulong *dst, const target_ulong *src)
1395
{
1396
    dst[0] = src[0];
1397
    dst[1] = src[1];
1398
    dst[2] = src[2];
1399
    dst[3] = src[3];
1400
    dst[4] = src[4];
1401
    dst[5] = src[5];
1402
    dst[6] = src[6];
1403
    dst[7] = src[7];
1404
}
1405

    
1406
static void set_cwp(int new_cwp)
1407
{
1408
    /* put the modified wrap registers at their proper location */
1409
    if (env->cwp == env->nwindows - 1) {
1410
        memcpy32(env->regbase, env->regbase + env->nwindows * 16);
1411
    }
1412
    env->cwp = new_cwp;
1413

    
1414
    /* put the wrap registers at their temporary location */
1415
    if (new_cwp == env->nwindows - 1) {
1416
        memcpy32(env->regbase + env->nwindows * 16, env->regbase);
1417
    }
1418
    env->regwptr = env->regbase + (new_cwp * 16);
1419
}
1420

    
1421
void cpu_set_cwp(CPUState *env1, int new_cwp)
1422
{
1423
    CPUState *saved_env;
1424

    
1425
    saved_env = env;
1426
    env = env1;
1427
    set_cwp(new_cwp);
1428
    env = saved_env;
1429
}
1430

    
1431
static target_ulong get_psr(void)
1432
{
1433
    helper_compute_psr();
1434

    
1435
#if !defined (TARGET_SPARC64)
1436
    return env->version | (env->psr & PSR_ICC) |
1437
        (env->psref? PSR_EF : 0) |
1438
        (env->psrpil << 8) |
1439
        (env->psrs? PSR_S : 0) |
1440
        (env->psrps? PSR_PS : 0) |
1441
        (env->psret? PSR_ET : 0) | env->cwp;
1442
#else
1443
    return env->psr & PSR_ICC;
1444
#endif
1445
}
1446

    
1447
target_ulong cpu_get_psr(CPUState *env1)
1448
{
1449
    CPUState *saved_env;
1450
    target_ulong ret;
1451

    
1452
    saved_env = env;
1453
    env = env1;
1454
    ret = get_psr();
1455
    env = saved_env;
1456
    return ret;
1457
}
1458

    
1459
static void put_psr(target_ulong val)
1460
{
1461
    env->psr = val & PSR_ICC;
1462
#if !defined (TARGET_SPARC64)
1463
    env->psref = (val & PSR_EF)? 1 : 0;
1464
    env->psrpil = (val & PSR_PIL) >> 8;
1465
#endif
1466
#if ((!defined (TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY))
1467
    cpu_check_irqs(env);
1468
#endif
1469
#if !defined (TARGET_SPARC64)
1470
    env->psrs = (val & PSR_S)? 1 : 0;
1471
    env->psrps = (val & PSR_PS)? 1 : 0;
1472
    env->psret = (val & PSR_ET)? 1 : 0;
1473
    set_cwp(val & PSR_CWP);
1474
#endif
1475
    env->cc_op = CC_OP_FLAGS;
1476
}
1477

    
1478
void cpu_put_psr(CPUState *env1, target_ulong val)
1479
{
1480
    CPUState *saved_env;
1481

    
1482
    saved_env = env;
1483
    env = env1;
1484
    put_psr(val);
1485
    env = saved_env;
1486
}
1487

    
1488
static int cwp_inc(int cwp)
1489
{
1490
    if (unlikely(cwp >= env->nwindows)) {
1491
        cwp -= env->nwindows;
1492
    }
1493
    return cwp;
1494
}
1495

    
1496
int cpu_cwp_inc(CPUState *env1, int cwp)
1497
{
1498
    CPUState *saved_env;
1499
    target_ulong ret;
1500

    
1501
    saved_env = env;
1502
    env = env1;
1503
    ret = cwp_inc(cwp);
1504
    env = saved_env;
1505
    return ret;
1506
}
1507

    
1508
static int cwp_dec(int cwp)
1509
{
1510
    if (unlikely(cwp < 0)) {
1511
        cwp += env->nwindows;
1512
    }
1513
    return cwp;
1514
}
1515

    
1516
int cpu_cwp_dec(CPUState *env1, int cwp)
1517
{
1518
    CPUState *saved_env;
1519
    target_ulong ret;
1520

    
1521
    saved_env = env;
1522
    env = env1;
1523
    ret = cwp_dec(cwp);
1524
    env = saved_env;
1525
    return ret;
1526
}
1527

    
1528
#ifdef TARGET_SPARC64
1529
GEN_FCMPS(fcmps_fcc1, float32, 22, 0);
1530
GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
1531
GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
1532

    
1533
GEN_FCMPS(fcmps_fcc2, float32, 24, 0);
1534
GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
1535
GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
1536

    
1537
GEN_FCMPS(fcmps_fcc3, float32, 26, 0);
1538
GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
1539
GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
1540

    
1541
GEN_FCMPS(fcmpes_fcc1, float32, 22, 1);
1542
GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
1543
GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
1544

    
1545
GEN_FCMPS(fcmpes_fcc2, float32, 24, 1);
1546
GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
1547
GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
1548

    
1549
GEN_FCMPS(fcmpes_fcc3, float32, 26, 1);
1550
GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
1551
GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
1552
#endif
1553
#undef GEN_FCMPS
1554

    
1555
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
1556
    defined(DEBUG_MXCC)
1557
static void dump_mxcc(CPUState *env)
1558
{
1559
    printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1560
           "\n",
1561
           env->mxccdata[0], env->mxccdata[1],
1562
           env->mxccdata[2], env->mxccdata[3]);
1563
    printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1564
           "\n"
1565
           "          %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1566
           "\n",
1567
           env->mxccregs[0], env->mxccregs[1],
1568
           env->mxccregs[2], env->mxccregs[3],
1569
           env->mxccregs[4], env->mxccregs[5],
1570
           env->mxccregs[6], env->mxccregs[7]);
1571
}
1572
#endif
1573

    
1574
#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
1575
    && defined(DEBUG_ASI)
1576
static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
1577
                     uint64_t r1)
1578
{
1579
    switch (size)
1580
    {
1581
    case 1:
1582
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
1583
                    addr, asi, r1 & 0xff);
1584
        break;
1585
    case 2:
1586
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
1587
                    addr, asi, r1 & 0xffff);
1588
        break;
1589
    case 4:
1590
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
1591
                    addr, asi, r1 & 0xffffffff);
1592
        break;
1593
    case 8:
1594
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
1595
                    addr, asi, r1);
1596
        break;
1597
    }
1598
}
1599
#endif
1600

    
1601
#ifndef TARGET_SPARC64
1602
#ifndef CONFIG_USER_ONLY
1603
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1604
{
1605
    uint64_t ret = 0;
1606
#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
1607
    uint32_t last_addr = addr;
1608
#endif
1609

    
1610
    helper_check_align(addr, size - 1);
1611
    switch (asi) {
1612
    case 2: /* SuperSparc MXCC registers */
1613
        switch (addr) {
1614
        case 0x01c00a00: /* MXCC control register */
1615
            if (size == 8)
1616
                ret = env->mxccregs[3];
1617
            else
1618
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1619
                             size);
1620
            break;
1621
        case 0x01c00a04: /* MXCC control register */
1622
            if (size == 4)
1623
                ret = env->mxccregs[3];
1624
            else
1625
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1626
                             size);
1627
            break;
1628
        case 0x01c00c00: /* Module reset register */
1629
            if (size == 8) {
1630
                ret = env->mxccregs[5];
1631
                // should we do something here?
1632
            } else
1633
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1634
                             size);
1635
            break;
1636
        case 0x01c00f00: /* MBus port address register */
1637
            if (size == 8)
1638
                ret = env->mxccregs[7];
1639
            else
1640
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1641
                             size);
1642
            break;
1643
        default:
1644
            DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1645
                         size);
1646
            break;
1647
        }
1648
        DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
1649
                     "addr = %08x -> ret = %" PRIx64 ","
1650
                     "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
1651
#ifdef DEBUG_MXCC
1652
        dump_mxcc(env);
1653
#endif
1654
        break;
1655
    case 3: /* MMU probe */
1656
        {
1657
            int mmulev;
1658

    
1659
            mmulev = (addr >> 8) & 15;
1660
            if (mmulev > 4)
1661
                ret = 0;
1662
            else
1663
                ret = mmu_probe(env, addr, mmulev);
1664
            DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
1665
                        addr, mmulev, ret);
1666
        }
1667
        break;
1668
    case 4: /* read MMU regs */
1669
        {
1670
            int reg = (addr >> 8) & 0x1f;
1671

    
1672
            ret = env->mmuregs[reg];
1673
            if (reg == 3) /* Fault status cleared on read */
1674
                env->mmuregs[3] = 0;
1675
            else if (reg == 0x13) /* Fault status read */
1676
                ret = env->mmuregs[3];
1677
            else if (reg == 0x14) /* Fault address read */
1678
                ret = env->mmuregs[4];
1679
            DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
1680
        }
1681
        break;
1682
    case 5: // Turbosparc ITLB Diagnostic
1683
    case 6: // Turbosparc DTLB Diagnostic
1684
    case 7: // Turbosparc IOTLB Diagnostic
1685
        break;
1686
    case 9: /* Supervisor code access */
1687
        switch(size) {
1688
        case 1:
1689
            ret = ldub_code(addr);
1690
            break;
1691
        case 2:
1692
            ret = lduw_code(addr);
1693
            break;
1694
        default:
1695
        case 4:
1696
            ret = ldl_code(addr);
1697
            break;
1698
        case 8:
1699
            ret = ldq_code(addr);
1700
            break;
1701
        }
1702
        break;
1703
    case 0xa: /* User data access */
1704
        switch(size) {
1705
        case 1:
1706
            ret = ldub_user(addr);
1707
            break;
1708
        case 2:
1709
            ret = lduw_user(addr);
1710
            break;
1711
        default:
1712
        case 4:
1713
            ret = ldl_user(addr);
1714
            break;
1715
        case 8:
1716
            ret = ldq_user(addr);
1717
            break;
1718
        }
1719
        break;
1720
    case 0xb: /* Supervisor data access */
1721
        switch(size) {
1722
        case 1:
1723
            ret = ldub_kernel(addr);
1724
            break;
1725
        case 2:
1726
            ret = lduw_kernel(addr);
1727
            break;
1728
        default:
1729
        case 4:
1730
            ret = ldl_kernel(addr);
1731
            break;
1732
        case 8:
1733
            ret = ldq_kernel(addr);
1734
            break;
1735
        }
1736
        break;
1737
    case 0xc: /* I-cache tag */
1738
    case 0xd: /* I-cache data */
1739
    case 0xe: /* D-cache tag */
1740
    case 0xf: /* D-cache data */
1741
        break;
1742
    case 0x20: /* MMU passthrough */
1743
        switch(size) {
1744
        case 1:
1745
            ret = ldub_phys(addr);
1746
            break;
1747
        case 2:
1748
            ret = lduw_phys(addr);
1749
            break;
1750
        default:
1751
        case 4:
1752
            ret = ldl_phys(addr);
1753
            break;
1754
        case 8:
1755
            ret = ldq_phys(addr);
1756
            break;
1757
        }
1758
        break;
1759
    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1760
        switch(size) {
1761
        case 1:
1762
            ret = ldub_phys((target_phys_addr_t)addr
1763
                            | ((target_phys_addr_t)(asi & 0xf) << 32));
1764
            break;
1765
        case 2:
1766
            ret = lduw_phys((target_phys_addr_t)addr
1767
                            | ((target_phys_addr_t)(asi & 0xf) << 32));
1768
            break;
1769
        default:
1770
        case 4:
1771
            ret = ldl_phys((target_phys_addr_t)addr
1772
                           | ((target_phys_addr_t)(asi & 0xf) << 32));
1773
            break;
1774
        case 8:
1775
            ret = ldq_phys((target_phys_addr_t)addr
1776
                           | ((target_phys_addr_t)(asi & 0xf) << 32));
1777
            break;
1778
        }
1779
        break;
1780
    case 0x30: // Turbosparc secondary cache diagnostic
1781
    case 0x31: // Turbosparc RAM snoop
1782
    case 0x32: // Turbosparc page table descriptor diagnostic
1783
    case 0x39: /* data cache diagnostic register */
1784
    case 0x4c: /* SuperSPARC MMU Breakpoint Action register */
1785
        ret = 0;
1786
        break;
1787
    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
1788
        {
1789
            int reg = (addr >> 8) & 3;
1790

    
1791
            switch(reg) {
1792
            case 0: /* Breakpoint Value (Addr) */
1793
                ret = env->mmubpregs[reg];
1794
                break;
1795
            case 1: /* Breakpoint Mask */
1796
                ret = env->mmubpregs[reg];
1797
                break;
1798
            case 2: /* Breakpoint Control */
1799
                ret = env->mmubpregs[reg];
1800
                break;
1801
            case 3: /* Breakpoint Status */
1802
                ret = env->mmubpregs[reg];
1803
                env->mmubpregs[reg] = 0ULL;
1804
                break;
1805
            }
1806
            DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
1807
                        ret);
1808
        }
1809
        break;
1810
    case 8: /* User code access, XXX */
1811
    default:
1812
        do_unassigned_access(addr, 0, 0, asi, size);
1813
        ret = 0;
1814
        break;
1815
    }
1816
    if (sign) {
1817
        switch(size) {
1818
        case 1:
1819
            ret = (int8_t) ret;
1820
            break;
1821
        case 2:
1822
            ret = (int16_t) ret;
1823
            break;
1824
        case 4:
1825
            ret = (int32_t) ret;
1826
            break;
1827
        default:
1828
            break;
1829
        }
1830
    }
1831
#ifdef DEBUG_ASI
1832
    dump_asi("read ", last_addr, asi, size, ret);
1833
#endif
1834
    return ret;
1835
}
1836

    
1837
void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
1838
{
1839
    helper_check_align(addr, size - 1);
1840
    switch(asi) {
1841
    case 2: /* SuperSparc MXCC registers */
1842
        switch (addr) {
1843
        case 0x01c00000: /* MXCC stream data register 0 */
1844
            if (size == 8)
1845
                env->mxccdata[0] = val;
1846
            else
1847
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1848
                             size);
1849
            break;
1850
        case 0x01c00008: /* MXCC stream data register 1 */
1851
            if (size == 8)
1852
                env->mxccdata[1] = val;
1853
            else
1854
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1855
                             size);
1856
            break;
1857
        case 0x01c00010: /* MXCC stream data register 2 */
1858
            if (size == 8)
1859
                env->mxccdata[2] = val;
1860
            else
1861
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1862
                             size);
1863
            break;
1864
        case 0x01c00018: /* MXCC stream data register 3 */
1865
            if (size == 8)
1866
                env->mxccdata[3] = val;
1867
            else
1868
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1869
                             size);
1870
            break;
1871
        case 0x01c00100: /* MXCC stream source */
1872
            if (size == 8)
1873
                env->mxccregs[0] = val;
1874
            else
1875
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1876
                             size);
1877
            env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1878
                                        0);
1879
            env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1880
                                        8);
1881
            env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1882
                                        16);
1883
            env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1884
                                        24);
1885
            break;
1886
        case 0x01c00200: /* MXCC stream destination */
1887
            if (size == 8)
1888
                env->mxccregs[1] = val;
1889
            else
1890
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1891
                             size);
1892
            stq_phys((env->mxccregs[1] & 0xffffffffULL) +  0,
1893
                     env->mxccdata[0]);
1894
            stq_phys((env->mxccregs[1] & 0xffffffffULL) +  8,
1895
                     env->mxccdata[1]);
1896
            stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
1897
                     env->mxccdata[2]);
1898
            stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
1899
                     env->mxccdata[3]);
1900
            break;
1901
        case 0x01c00a00: /* MXCC control register */
1902
            if (size == 8)
1903
                env->mxccregs[3] = val;
1904
            else
1905
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1906
                             size);
1907
            break;
1908
        case 0x01c00a04: /* MXCC control register */
1909
            if (size == 4)
1910
                env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
1911
                    | val;
1912
            else
1913
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1914
                             size);
1915
            break;
1916
        case 0x01c00e00: /* MXCC error register  */
1917
            // writing a 1 bit clears the error
1918
            if (size == 8)
1919
                env->mxccregs[6] &= ~val;
1920
            else
1921
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1922
                             size);
1923
            break;
1924
        case 0x01c00f00: /* MBus port address register */
1925
            if (size == 8)
1926
                env->mxccregs[7] = val;
1927
            else
1928
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1929
                             size);
1930
            break;
1931
        default:
1932
            DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1933
                         size);
1934
            break;
1935
        }
1936
        DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
1937
                     asi, size, addr, val);
1938
#ifdef DEBUG_MXCC
1939
        dump_mxcc(env);
1940
#endif
1941
        break;
1942
    case 3: /* MMU flush */
1943
        {
1944
            int mmulev;
1945

    
1946
            mmulev = (addr >> 8) & 15;
1947
            DPRINTF_MMU("mmu flush level %d\n", mmulev);
1948
            switch (mmulev) {
1949
            case 0: // flush page
1950
                tlb_flush_page(env, addr & 0xfffff000);
1951
                break;
1952
            case 1: // flush segment (256k)
1953
            case 2: // flush region (16M)
1954
            case 3: // flush context (4G)
1955
            case 4: // flush entire
1956
                tlb_flush(env, 1);
1957
                break;
1958
            default:
1959
                break;
1960
            }
1961
#ifdef DEBUG_MMU
1962
            dump_mmu(env);
1963
#endif
1964
        }
1965
        break;
1966
    case 4: /* write MMU regs */
1967
        {
1968
            int reg = (addr >> 8) & 0x1f;
1969
            uint32_t oldreg;
1970

    
1971
            oldreg = env->mmuregs[reg];
1972
            switch(reg) {
1973
            case 0: // Control Register
1974
                env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
1975
                                    (val & 0x00ffffff);
1976
                // Mappings generated during no-fault mode or MMU
1977
                // disabled mode are invalid in normal mode
1978
                if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
1979
                    (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
1980
                    tlb_flush(env, 1);
1981
                break;
1982
            case 1: // Context Table Pointer Register
1983
                env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
1984
                break;
1985
            case 2: // Context Register
1986
                env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
1987
                if (oldreg != env->mmuregs[reg]) {
1988
                    /* we flush when the MMU context changes because
1989
                       QEMU has no MMU context support */
1990
                    tlb_flush(env, 1);
1991
                }
1992
                break;
1993
            case 3: // Synchronous Fault Status Register with Clear
1994
            case 4: // Synchronous Fault Address Register
1995
                break;
1996
            case 0x10: // TLB Replacement Control Register
1997
                env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
1998
                break;
1999
            case 0x13: // Synchronous Fault Status Register with Read and Clear
2000
                env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
2001
                break;
2002
            case 0x14: // Synchronous Fault Address Register
2003
                env->mmuregs[4] = val;
2004
                break;
2005
            default:
2006
                env->mmuregs[reg] = val;
2007
                break;
2008
            }
2009
            if (oldreg != env->mmuregs[reg]) {
2010
                DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
2011
                            reg, oldreg, env->mmuregs[reg]);
2012
            }
2013
#ifdef DEBUG_MMU
2014
            dump_mmu(env);
2015
#endif
2016
        }
2017
        break;
2018
    case 5: // Turbosparc ITLB Diagnostic
2019
    case 6: // Turbosparc DTLB Diagnostic
2020
    case 7: // Turbosparc IOTLB Diagnostic
2021
        break;
2022
    case 0xa: /* User data access */
2023
        switch(size) {
2024
        case 1:
2025
            stb_user(addr, val);
2026
            break;
2027
        case 2:
2028
            stw_user(addr, val);
2029
            break;
2030
        default:
2031
        case 4:
2032
            stl_user(addr, val);
2033
            break;
2034
        case 8:
2035
            stq_user(addr, val);
2036
            break;
2037
        }
2038
        break;
2039
    case 0xb: /* Supervisor data access */
2040
        switch(size) {
2041
        case 1:
2042
            stb_kernel(addr, val);
2043
            break;
2044
        case 2:
2045
            stw_kernel(addr, val);
2046
            break;
2047
        default:
2048
        case 4:
2049
            stl_kernel(addr, val);
2050
            break;
2051
        case 8:
2052
            stq_kernel(addr, val);
2053
            break;
2054
        }
2055
        break;
2056
    case 0xc: /* I-cache tag */
2057
    case 0xd: /* I-cache data */
2058
    case 0xe: /* D-cache tag */
2059
    case 0xf: /* D-cache data */
2060
    case 0x10: /* I/D-cache flush page */
2061
    case 0x11: /* I/D-cache flush segment */
2062
    case 0x12: /* I/D-cache flush region */
2063
    case 0x13: /* I/D-cache flush context */
2064
    case 0x14: /* I/D-cache flush user */
2065
        break;
2066
    case 0x17: /* Block copy, sta access */
2067
        {
2068
            // val = src
2069
            // addr = dst
2070
            // copy 32 bytes
2071
            unsigned int i;
2072
            uint32_t src = val & ~3, dst = addr & ~3, temp;
2073

    
2074
            for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
2075
                temp = ldl_kernel(src);
2076
                stl_kernel(dst, temp);
2077
            }
2078
        }
2079
        break;
2080
    case 0x1f: /* Block fill, stda access */
2081
        {
2082
            // addr = dst
2083
            // fill 32 bytes with val
2084
            unsigned int i;
2085
            uint32_t dst = addr & 7;
2086

    
2087
            for (i = 0; i < 32; i += 8, dst += 8)
2088
                stq_kernel(dst, val);
2089
        }
2090
        break;
2091
    case 0x20: /* MMU passthrough */
2092
        {
2093
            switch(size) {
2094
            case 1:
2095
                stb_phys(addr, val);
2096
                break;
2097
            case 2:
2098
                stw_phys(addr, val);
2099
                break;
2100
            case 4:
2101
            default:
2102
                stl_phys(addr, val);
2103
                break;
2104
            case 8:
2105
                stq_phys(addr, val);
2106
                break;
2107
            }
2108
        }
2109
        break;
2110
    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
2111
        {
2112
            switch(size) {
2113
            case 1:
2114
                stb_phys((target_phys_addr_t)addr
2115
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2116
                break;
2117
            case 2:
2118
                stw_phys((target_phys_addr_t)addr
2119
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2120
                break;
2121
            case 4:
2122
            default:
2123
                stl_phys((target_phys_addr_t)addr
2124
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2125
                break;
2126
            case 8:
2127
                stq_phys((target_phys_addr_t)addr
2128
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2129
                break;
2130
            }
2131
        }
2132
        break;
2133
    case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
2134
    case 0x31: // store buffer data, Ross RT620 I-cache flush or
2135
               // Turbosparc snoop RAM
2136
    case 0x32: // store buffer control or Turbosparc page table
2137
               // descriptor diagnostic
2138
    case 0x36: /* I-cache flash clear */
2139
    case 0x37: /* D-cache flash clear */
2140
    case 0x4c: /* breakpoint action */
2141
        break;
2142
    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
2143
        {
2144
            int reg = (addr >> 8) & 3;
2145

    
2146
            switch(reg) {
2147
            case 0: /* Breakpoint Value (Addr) */
2148
                env->mmubpregs[reg] = (val & 0xfffffffffULL);
2149
                break;
2150
            case 1: /* Breakpoint Mask */
2151
                env->mmubpregs[reg] = (val & 0xfffffffffULL);
2152
                break;
2153
            case 2: /* Breakpoint Control */
2154
                env->mmubpregs[reg] = (val & 0x7fULL);
2155
                break;
2156
            case 3: /* Breakpoint Status */
2157
                env->mmubpregs[reg] = (val & 0xfULL);
2158
                break;
2159
            }
2160
            DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
2161
                        env->mmuregs[reg]);
2162
        }
2163
        break;
2164
    case 8: /* User code access, XXX */
2165
    case 9: /* Supervisor code access, XXX */
2166
    default:
2167
        do_unassigned_access(addr, 1, 0, asi, size);
2168
        break;
2169
    }
2170
#ifdef DEBUG_ASI
2171
    dump_asi("write", addr, asi, size, val);
2172
#endif
2173
}
2174

    
2175
#endif /* CONFIG_USER_ONLY */
2176
#else /* TARGET_SPARC64 */
2177

    
2178
#ifdef CONFIG_USER_ONLY
2179
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2180
{
2181
    uint64_t ret = 0;
2182
#if defined(DEBUG_ASI)
2183
    target_ulong last_addr = addr;
2184
#endif
2185

    
2186
    if (asi < 0x80)
2187
        raise_exception(TT_PRIV_ACT);
2188

    
2189
    helper_check_align(addr, size - 1);
2190
    addr = asi_address_mask(env, asi, addr);
2191

    
2192
    switch (asi) {
2193
    case 0x82: // Primary no-fault
2194
    case 0x8a: // Primary no-fault LE
2195
        if (page_check_range(addr, size, PAGE_READ) == -1) {
2196
#ifdef DEBUG_ASI
2197
            dump_asi("read ", last_addr, asi, size, ret);
2198
#endif
2199
            return 0;
2200
        }
2201
        // Fall through
2202
    case 0x80: // Primary
2203
    case 0x88: // Primary LE
2204
        {
2205
            switch(size) {
2206
            case 1:
2207
                ret = ldub_raw(addr);
2208
                break;
2209
            case 2:
2210
                ret = lduw_raw(addr);
2211
                break;
2212
            case 4:
2213
                ret = ldl_raw(addr);
2214
                break;
2215
            default:
2216
            case 8:
2217
                ret = ldq_raw(addr);
2218
                break;
2219
            }
2220
        }
2221
        break;
2222
    case 0x83: // Secondary no-fault
2223
    case 0x8b: // Secondary no-fault LE
2224
        if (page_check_range(addr, size, PAGE_READ) == -1) {
2225
#ifdef DEBUG_ASI
2226
            dump_asi("read ", last_addr, asi, size, ret);
2227
#endif
2228
            return 0;
2229
        }
2230
        // Fall through
2231
    case 0x81: // Secondary
2232
    case 0x89: // Secondary LE
2233
        // XXX
2234
        break;
2235
    default:
2236
        break;
2237
    }
2238

    
2239
    /* Convert from little endian */
2240
    switch (asi) {
2241
    case 0x88: // Primary LE
2242
    case 0x89: // Secondary LE
2243
    case 0x8a: // Primary no-fault LE
2244
    case 0x8b: // Secondary no-fault LE
2245
        switch(size) {
2246
        case 2:
2247
            ret = bswap16(ret);
2248
            break;
2249
        case 4:
2250
            ret = bswap32(ret);
2251
            break;
2252
        case 8:
2253
            ret = bswap64(ret);
2254
            break;
2255
        default:
2256
            break;
2257
        }
2258
    default:
2259
        break;
2260
    }
2261

    
2262
    /* Convert to signed number */
2263
    if (sign) {
2264
        switch(size) {
2265
        case 1:
2266
            ret = (int8_t) ret;
2267
            break;
2268
        case 2:
2269
            ret = (int16_t) ret;
2270
            break;
2271
        case 4:
2272
            ret = (int32_t) ret;
2273
            break;
2274
        default:
2275
            break;
2276
        }
2277
    }
2278
#ifdef DEBUG_ASI
2279
    dump_asi("read ", last_addr, asi, size, ret);
2280
#endif
2281
    return ret;
2282
}
2283

    
2284
void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2285
{
2286
#ifdef DEBUG_ASI
2287
    dump_asi("write", addr, asi, size, val);
2288
#endif
2289
    if (asi < 0x80)
2290
        raise_exception(TT_PRIV_ACT);
2291

    
2292
    helper_check_align(addr, size - 1);
2293
    addr = asi_address_mask(env, asi, addr);
2294

    
2295
    /* Convert to little endian */
2296
    switch (asi) {
2297
    case 0x88: // Primary LE
2298
    case 0x89: // Secondary LE
2299
        switch(size) {
2300
        case 2:
2301
            val = bswap16(val);
2302
            break;
2303
        case 4:
2304
            val = bswap32(val);
2305
            break;
2306
        case 8:
2307
            val = bswap64(val);
2308
            break;
2309
        default:
2310
            break;
2311
        }
2312
    default:
2313
        break;
2314
    }
2315

    
2316
    switch(asi) {
2317
    case 0x80: // Primary
2318
    case 0x88: // Primary LE
2319
        {
2320
            switch(size) {
2321
            case 1:
2322
                stb_raw(addr, val);
2323
                break;
2324
            case 2:
2325
                stw_raw(addr, val);
2326
                break;
2327
            case 4:
2328
                stl_raw(addr, val);
2329
                break;
2330
            case 8:
2331
            default:
2332
                stq_raw(addr, val);
2333
                break;
2334
            }
2335
        }
2336
        break;
2337
    case 0x81: // Secondary
2338
    case 0x89: // Secondary LE
2339
        // XXX
2340
        return;
2341

    
2342
    case 0x82: // Primary no-fault, RO
2343
    case 0x83: // Secondary no-fault, RO
2344
    case 0x8a: // Primary no-fault LE, RO
2345
    case 0x8b: // Secondary no-fault LE, RO
2346
    default:
2347
        do_unassigned_access(addr, 1, 0, 1, size);
2348
        return;
2349
    }
2350
}
2351

    
2352
#else /* CONFIG_USER_ONLY */
2353

    
2354
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2355
{
2356
    uint64_t ret = 0;
2357
#if defined(DEBUG_ASI)
2358
    target_ulong last_addr = addr;
2359
#endif
2360

    
2361
    asi &= 0xff;
2362

    
2363
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2364
        || (cpu_has_hypervisor(env)
2365
            && asi >= 0x30 && asi < 0x80
2366
            && !(env->hpstate & HS_PRIV)))
2367
        raise_exception(TT_PRIV_ACT);
2368

    
2369
    helper_check_align(addr, size - 1);
2370
    addr = asi_address_mask(env, asi, addr);
2371

    
2372
    switch (asi) {
2373
    case 0x82: // Primary no-fault
2374
    case 0x8a: // Primary no-fault LE
2375
    case 0x83: // Secondary no-fault
2376
    case 0x8b: // Secondary no-fault LE
2377
        {
2378
            /* secondary space access has lowest asi bit equal to 1 */
2379
            int access_mmu_idx = ( asi & 1 ) ? MMU_KERNEL_IDX
2380
                                             : MMU_KERNEL_SECONDARY_IDX;
2381

    
2382
            if (cpu_get_phys_page_nofault(env, addr, access_mmu_idx) == -1ULL) {
2383
#ifdef DEBUG_ASI
2384
                dump_asi("read ", last_addr, asi, size, ret);
2385
#endif
2386
                return 0;
2387
            }
2388
        }
2389
        // Fall through
2390
    case 0x10: // As if user primary
2391
    case 0x11: // As if user secondary
2392
    case 0x18: // As if user primary LE
2393
    case 0x19: // As if user secondary LE
2394
    case 0x80: // Primary
2395
    case 0x81: // Secondary
2396
    case 0x88: // Primary LE
2397
    case 0x89: // Secondary LE
2398
    case 0xe2: // UA2007 Primary block init
2399
    case 0xe3: // UA2007 Secondary block init
2400
        if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2401
            if (cpu_hypervisor_mode(env)) {
2402
                switch(size) {
2403
                case 1:
2404
                    ret = ldub_hypv(addr);
2405
                    break;
2406
                case 2:
2407
                    ret = lduw_hypv(addr);
2408
                    break;
2409
                case 4:
2410
                    ret = ldl_hypv(addr);
2411
                    break;
2412
                default:
2413
                case 8:
2414
                    ret = ldq_hypv(addr);
2415
                    break;
2416
                }
2417
            } else {
2418
                /* secondary space access has lowest asi bit equal to 1 */
2419
                if (asi & 1) {
2420
                    switch(size) {
2421
                    case 1:
2422
                        ret = ldub_kernel_secondary(addr);
2423
                        break;
2424
                    case 2:
2425
                        ret = lduw_kernel_secondary(addr);
2426
                        break;
2427
                    case 4:
2428
                        ret = ldl_kernel_secondary(addr);
2429
                        break;
2430
                    default:
2431
                    case 8:
2432
                        ret = ldq_kernel_secondary(addr);
2433
                        break;
2434
                    }
2435
                } else {
2436
                    switch(size) {
2437
                    case 1:
2438
                        ret = ldub_kernel(addr);
2439
                        break;
2440
                    case 2:
2441
                        ret = lduw_kernel(addr);
2442
                        break;
2443
                    case 4:
2444
                        ret = ldl_kernel(addr);
2445
                        break;
2446
                    default:
2447
                    case 8:
2448
                        ret = ldq_kernel(addr);
2449
                        break;
2450
                    }
2451
                }
2452
            }
2453
        } else {
2454
            /* secondary space access has lowest asi bit equal to 1 */
2455
            if (asi & 1) {
2456
                switch(size) {
2457
                case 1:
2458
                    ret = ldub_user_secondary(addr);
2459
                    break;
2460
                case 2:
2461
                    ret = lduw_user_secondary(addr);
2462
                    break;
2463
                case 4:
2464
                    ret = ldl_user_secondary(addr);
2465
                    break;
2466
                default:
2467
                case 8:
2468
                    ret = ldq_user_secondary(addr);
2469
                    break;
2470
                }
2471
            } else {
2472
                switch(size) {
2473
                case 1:
2474
                    ret = ldub_user(addr);
2475
                    break;
2476
                case 2:
2477
                    ret = lduw_user(addr);
2478
                    break;
2479
                case 4:
2480
                    ret = ldl_user(addr);
2481
                    break;
2482
                default:
2483
                case 8:
2484
                    ret = ldq_user(addr);
2485
                    break;
2486
                }
2487
            }
2488
        }
2489
        break;
2490
    case 0x14: // Bypass
2491
    case 0x15: // Bypass, non-cacheable
2492
    case 0x1c: // Bypass LE
2493
    case 0x1d: // Bypass, non-cacheable LE
2494
        {
2495
            switch(size) {
2496
            case 1:
2497
                ret = ldub_phys(addr);
2498
                break;
2499
            case 2:
2500
                ret = lduw_phys(addr);
2501
                break;
2502
            case 4:
2503
                ret = ldl_phys(addr);
2504
                break;
2505
            default:
2506
            case 8:
2507
                ret = ldq_phys(addr);
2508
                break;
2509
            }
2510
            break;
2511
        }
2512
    case 0x24: // Nucleus quad LDD 128 bit atomic
2513
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2514
        //  Only ldda allowed
2515
        raise_exception(TT_ILL_INSN);
2516
        return 0;
2517
    case 0x04: // Nucleus
2518
    case 0x0c: // Nucleus Little Endian (LE)
2519
    {
2520
        switch(size) {
2521
        case 1:
2522
            ret = ldub_nucleus(addr);
2523
            break;
2524
        case 2:
2525
            ret = lduw_nucleus(addr);
2526
            break;
2527
        case 4:
2528
            ret = ldl_nucleus(addr);
2529
            break;
2530
        default:
2531
        case 8:
2532
            ret = ldq_nucleus(addr);
2533
            break;
2534
        }
2535
        break;
2536
    }
2537
    case 0x4a: // UPA config
2538
        // XXX
2539
        break;
2540
    case 0x45: // LSU
2541
        ret = env->lsu;
2542
        break;
2543
    case 0x50: // I-MMU regs
2544
        {
2545
            int reg = (addr >> 3) & 0xf;
2546

    
2547
            if (reg == 0) {
2548
                // I-TSB Tag Target register
2549
                ret = ultrasparc_tag_target(env->immu.tag_access);
2550
            } else {
2551
                ret = env->immuregs[reg];
2552
            }
2553

    
2554
            break;
2555
        }
2556
    case 0x51: // I-MMU 8k TSB pointer
2557
        {
2558
            // env->immuregs[5] holds I-MMU TSB register value
2559
            // env->immuregs[6] holds I-MMU Tag Access register value
2560
            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2561
                                         8*1024);
2562
            break;
2563
        }
2564
    case 0x52: // I-MMU 64k TSB pointer
2565
        {
2566
            // env->immuregs[5] holds I-MMU TSB register value
2567
            // env->immuregs[6] holds I-MMU Tag Access register value
2568
            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2569
                                         64*1024);
2570
            break;
2571
        }
2572
    case 0x55: // I-MMU data access
2573
        {
2574
            int reg = (addr >> 3) & 0x3f;
2575

    
2576
            ret = env->itlb[reg].tte;
2577
            break;
2578
        }
2579
    case 0x56: // I-MMU tag read
2580
        {
2581
            int reg = (addr >> 3) & 0x3f;
2582

    
2583
            ret = env->itlb[reg].tag;
2584
            break;
2585
        }
2586
    case 0x58: // D-MMU regs
2587
        {
2588
            int reg = (addr >> 3) & 0xf;
2589

    
2590
            if (reg == 0) {
2591
                // D-TSB Tag Target register
2592
                ret = ultrasparc_tag_target(env->dmmu.tag_access);
2593
            } else {
2594
                ret = env->dmmuregs[reg];
2595
            }
2596
            break;
2597
        }
2598
    case 0x59: // D-MMU 8k TSB pointer
2599
        {
2600
            // env->dmmuregs[5] holds D-MMU TSB register value
2601
            // env->dmmuregs[6] holds D-MMU Tag Access register value
2602
            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2603
                                         8*1024);
2604
            break;
2605
        }
2606
    case 0x5a: // D-MMU 64k TSB pointer
2607
        {
2608
            // env->dmmuregs[5] holds D-MMU TSB register value
2609
            // env->dmmuregs[6] holds D-MMU Tag Access register value
2610
            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2611
                                         64*1024);
2612
            break;
2613
        }
2614
    case 0x5d: // D-MMU data access
2615
        {
2616
            int reg = (addr >> 3) & 0x3f;
2617

    
2618
            ret = env->dtlb[reg].tte;
2619
            break;
2620
        }
2621
    case 0x5e: // D-MMU tag read
2622
        {
2623
            int reg = (addr >> 3) & 0x3f;
2624

    
2625
            ret = env->dtlb[reg].tag;
2626
            break;
2627
        }
2628
    case 0x46: // D-cache data
2629
    case 0x47: // D-cache tag access
2630
    case 0x4b: // E-cache error enable
2631
    case 0x4c: // E-cache asynchronous fault status
2632
    case 0x4d: // E-cache asynchronous fault address
2633
    case 0x4e: // E-cache tag data
2634
    case 0x66: // I-cache instruction access
2635
    case 0x67: // I-cache tag access
2636
    case 0x6e: // I-cache predecode
2637
    case 0x6f: // I-cache LRU etc.
2638
    case 0x76: // E-cache tag
2639
    case 0x7e: // E-cache tag
2640
        break;
2641
    case 0x5b: // D-MMU data pointer
2642
    case 0x48: // Interrupt dispatch, RO
2643
    case 0x49: // Interrupt data receive
2644
    case 0x7f: // Incoming interrupt vector, RO
2645
        // XXX
2646
        break;
2647
    case 0x54: // I-MMU data in, WO
2648
    case 0x57: // I-MMU demap, WO
2649
    case 0x5c: // D-MMU data in, WO
2650
    case 0x5f: // D-MMU demap, WO
2651
    case 0x77: // Interrupt vector, WO
2652
    default:
2653
        do_unassigned_access(addr, 0, 0, 1, size);
2654
        ret = 0;
2655
        break;
2656
    }
2657

    
2658
    /* Convert from little endian */
2659
    switch (asi) {
2660
    case 0x0c: // Nucleus Little Endian (LE)
2661
    case 0x18: // As if user primary LE
2662
    case 0x19: // As if user secondary LE
2663
    case 0x1c: // Bypass LE
2664
    case 0x1d: // Bypass, non-cacheable LE
2665
    case 0x88: // Primary LE
2666
    case 0x89: // Secondary LE
2667
    case 0x8a: // Primary no-fault LE
2668
    case 0x8b: // Secondary no-fault LE
2669
        switch(size) {
2670
        case 2:
2671
            ret = bswap16(ret);
2672
            break;
2673
        case 4:
2674
            ret = bswap32(ret);
2675
            break;
2676
        case 8:
2677
            ret = bswap64(ret);
2678
            break;
2679
        default:
2680
            break;
2681
        }
2682
    default:
2683
        break;
2684
    }
2685

    
2686
    /* Convert to signed number */
2687
    if (sign) {
2688
        switch(size) {
2689
        case 1:
2690
            ret = (int8_t) ret;
2691
            break;
2692
        case 2:
2693
            ret = (int16_t) ret;
2694
            break;
2695
        case 4:
2696
            ret = (int32_t) ret;
2697
            break;
2698
        default:
2699
            break;
2700
        }
2701
    }
2702
#ifdef DEBUG_ASI
2703
    dump_asi("read ", last_addr, asi, size, ret);
2704
#endif
2705
    return ret;
2706
}
2707

    
2708
void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2709
{
2710
#ifdef DEBUG_ASI
2711
    dump_asi("write", addr, asi, size, val);
2712
#endif
2713

    
2714
    asi &= 0xff;
2715

    
2716
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2717
        || (cpu_has_hypervisor(env)
2718
            && asi >= 0x30 && asi < 0x80
2719
            && !(env->hpstate & HS_PRIV)))
2720
        raise_exception(TT_PRIV_ACT);
2721

    
2722
    helper_check_align(addr, size - 1);
2723
    addr = asi_address_mask(env, asi, addr);
2724

    
2725
    /* Convert to little endian */
2726
    switch (asi) {
2727
    case 0x0c: // Nucleus Little Endian (LE)
2728
    case 0x18: // As if user primary LE
2729
    case 0x19: // As if user secondary LE
2730
    case 0x1c: // Bypass LE
2731
    case 0x1d: // Bypass, non-cacheable LE
2732
    case 0x88: // Primary LE
2733
    case 0x89: // Secondary LE
2734
        switch(size) {
2735
        case 2:
2736
            val = bswap16(val);
2737
            break;
2738
        case 4:
2739
            val = bswap32(val);
2740
            break;
2741
        case 8:
2742
            val = bswap64(val);
2743
            break;
2744
        default:
2745
            break;
2746
        }
2747
    default:
2748
        break;
2749
    }
2750

    
2751
    switch(asi) {
2752
    case 0x10: // As if user primary
2753
    case 0x11: // As if user secondary
2754
    case 0x18: // As if user primary LE
2755
    case 0x19: // As if user secondary LE
2756
    case 0x80: // Primary
2757
    case 0x81: // Secondary
2758
    case 0x88: // Primary LE
2759
    case 0x89: // Secondary LE
2760
    case 0xe2: // UA2007 Primary block init
2761
    case 0xe3: // UA2007 Secondary block init
2762
        if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2763
            if (cpu_hypervisor_mode(env)) {
2764
                switch(size) {
2765
                case 1:
2766
                    stb_hypv(addr, val);
2767
                    break;
2768
                case 2:
2769
                    stw_hypv(addr, val);
2770
                    break;
2771
                case 4:
2772
                    stl_hypv(addr, val);
2773
                    break;
2774
                case 8:
2775
                default:
2776
                    stq_hypv(addr, val);
2777
                    break;
2778
                }
2779
            } else {
2780
                /* secondary space access has lowest asi bit equal to 1 */
2781
                if (asi & 1) {
2782
                    switch(size) {
2783
                    case 1:
2784
                        stb_kernel_secondary(addr, val);
2785
                        break;
2786
                    case 2:
2787
                        stw_kernel_secondary(addr, val);
2788
                        break;
2789
                    case 4:
2790
                        stl_kernel_secondary(addr, val);
2791
                        break;
2792
                    case 8:
2793
                    default:
2794
                        stq_kernel_secondary(addr, val);
2795
                        break;
2796
                    }
2797
                } else {
2798
                    switch(size) {
2799
                    case 1:
2800
                        stb_kernel(addr, val);
2801
                        break;
2802
                    case 2:
2803
                        stw_kernel(addr, val);
2804
                        break;
2805
                    case 4:
2806
                        stl_kernel(addr, val);
2807
                        break;
2808
                    case 8:
2809
                    default:
2810
                        stq_kernel(addr, val);
2811
                        break;
2812
                    }
2813
                }
2814
            }
2815
        } else {
2816
            /* secondary space access has lowest asi bit equal to 1 */
2817
            if (asi & 1) {
2818
                switch(size) {
2819
                case 1:
2820
                    stb_user_secondary(addr, val);
2821
                    break;
2822
                case 2:
2823
                    stw_user_secondary(addr, val);
2824
                    break;
2825
                case 4:
2826
                    stl_user_secondary(addr, val);
2827
                    break;
2828
                case 8:
2829
                default:
2830
                    stq_user_secondary(addr, val);
2831
                    break;
2832
                }
2833
            } else {
2834
                switch(size) {
2835
                case 1:
2836
                    stb_user(addr, val);
2837
                    break;
2838
                case 2:
2839
                    stw_user(addr, val);
2840
                    break;
2841
                case 4:
2842
                    stl_user(addr, val);
2843
                    break;
2844
                case 8:
2845
                default:
2846
                    stq_user(addr, val);
2847
                    break;
2848
                }
2849
            }
2850
        }
2851
        break;
2852
    case 0x14: // Bypass
2853
    case 0x15: // Bypass, non-cacheable
2854
    case 0x1c: // Bypass LE
2855
    case 0x1d: // Bypass, non-cacheable LE
2856
        {
2857
            switch(size) {
2858
            case 1:
2859
                stb_phys(addr, val);
2860
                break;
2861
            case 2:
2862
                stw_phys(addr, val);
2863
                break;
2864
            case 4:
2865
                stl_phys(addr, val);
2866
                break;
2867
            case 8:
2868
            default:
2869
                stq_phys(addr, val);
2870
                break;
2871
            }
2872
        }
2873
        return;
2874
    case 0x24: // Nucleus quad LDD 128 bit atomic
2875
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2876
        //  Only ldda allowed
2877
        raise_exception(TT_ILL_INSN);
2878
        return;
2879
    case 0x04: // Nucleus
2880
    case 0x0c: // Nucleus Little Endian (LE)
2881
    {
2882
        switch(size) {
2883
        case 1:
2884
            stb_nucleus(addr, val);
2885
            break;
2886
        case 2:
2887
            stw_nucleus(addr, val);
2888
            break;
2889
        case 4:
2890
            stl_nucleus(addr, val);
2891
            break;
2892
        default:
2893
        case 8:
2894
            stq_nucleus(addr, val);
2895
            break;
2896
        }
2897
        break;
2898
    }
2899

    
2900
    case 0x4a: // UPA config
2901
        // XXX
2902
        return;
2903
    case 0x45: // LSU
2904
        {
2905
            uint64_t oldreg;
2906

    
2907
            oldreg = env->lsu;
2908
            env->lsu = val & (DMMU_E | IMMU_E);
2909
            // Mappings generated during D/I MMU disabled mode are
2910
            // invalid in normal mode
2911
            if (oldreg != env->lsu) {
2912
                DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
2913
                            oldreg, env->lsu);
2914
#ifdef DEBUG_MMU
2915
                dump_mmu(env);
2916
#endif
2917
                tlb_flush(env, 1);
2918
            }
2919
            return;
2920
        }
2921
    case 0x50: // I-MMU regs
2922
        {
2923
            int reg = (addr >> 3) & 0xf;
2924
            uint64_t oldreg;
2925

    
2926
            oldreg = env->immuregs[reg];
2927
            switch(reg) {
2928
            case 0: // RO
2929
                return;
2930
            case 1: // Not in I-MMU
2931
            case 2:
2932
                return;
2933
            case 3: // SFSR
2934
                if ((val & 1) == 0)
2935
                    val = 0; // Clear SFSR
2936
                env->immu.sfsr = val;
2937
                break;
2938
            case 4: // RO
2939
                return;
2940
            case 5: // TSB access
2941
                DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
2942
                            PRIx64 "\n", env->immu.tsb, val);
2943
                env->immu.tsb = val;
2944
                break;
2945
            case 6: // Tag access
2946
                env->immu.tag_access = val;
2947
                break;
2948
            case 7:
2949
            case 8:
2950
                return;
2951
            default:
2952
                break;
2953
            }
2954

    
2955
            if (oldreg != env->immuregs[reg]) {
2956
                DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
2957
                            PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
2958
            }
2959
#ifdef DEBUG_MMU
2960
            dump_mmu(env);
2961
#endif
2962
            return;
2963
        }
2964
    case 0x54: // I-MMU data in
2965
        replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
2966
        return;
2967
    case 0x55: // I-MMU data access
2968
        {
2969
            // TODO: auto demap
2970

    
2971
            unsigned int i = (addr >> 3) & 0x3f;
2972

    
2973
            replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
2974

    
2975
#ifdef DEBUG_MMU
2976
            DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
2977
            dump_mmu(env);
2978
#endif
2979
            return;
2980
        }
2981
    case 0x57: // I-MMU demap
2982
        demap_tlb(env->itlb, addr, "immu", env);
2983
        return;
2984
    case 0x58: // D-MMU regs
2985
        {
2986
            int reg = (addr >> 3) & 0xf;
2987
            uint64_t oldreg;
2988

    
2989
            oldreg = env->dmmuregs[reg];
2990
            switch(reg) {
2991
            case 0: // RO
2992
            case 4:
2993
                return;
2994
            case 3: // SFSR
2995
                if ((val & 1) == 0) {
2996
                    val = 0; // Clear SFSR, Fault address
2997
                    env->dmmu.sfar = 0;
2998
                }
2999
                env->dmmu.sfsr = val;
3000
                break;
3001
            case 1: // Primary context
3002
                env->dmmu.mmu_primary_context = val;
3003
                /* can be optimized to only flush MMU_USER_IDX
3004
                   and MMU_KERNEL_IDX entries */
3005
                tlb_flush(env, 1);
3006
                break;
3007
            case 2: // Secondary context
3008
                env->dmmu.mmu_secondary_context = val;
3009
                /* can be optimized to only flush MMU_USER_SECONDARY_IDX
3010
                   and MMU_KERNEL_SECONDARY_IDX entries */
3011
                tlb_flush(env, 1);
3012
                break;
3013
            case 5: // TSB access
3014
                DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
3015
                            PRIx64 "\n", env->dmmu.tsb, val);
3016
                env->dmmu.tsb = val;
3017
                break;
3018
            case 6: // Tag access
3019
                env->dmmu.tag_access = val;
3020
                break;
3021
            case 7: // Virtual Watchpoint
3022
            case 8: // Physical Watchpoint
3023
            default:
3024
                env->dmmuregs[reg] = val;
3025
                break;
3026
            }
3027

    
3028
            if (oldreg != env->dmmuregs[reg]) {
3029
                DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
3030
                            PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
3031
            }
3032
#ifdef DEBUG_MMU
3033
            dump_mmu(env);
3034
#endif
3035
            return;
3036
        }
3037
    case 0x5c: // D-MMU data in
3038
        replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
3039
        return;
3040
    case 0x5d: // D-MMU data access
3041
        {
3042
            unsigned int i = (addr >> 3) & 0x3f;
3043

    
3044
            replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
3045

    
3046
#ifdef DEBUG_MMU
3047
            DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
3048
            dump_mmu(env);
3049
#endif
3050
            return;
3051
        }
3052
    case 0x5f: // D-MMU demap
3053
        demap_tlb(env->dtlb, addr, "dmmu", env);
3054
        return;
3055
    case 0x49: // Interrupt data receive
3056
        // XXX
3057
        return;
3058
    case 0x46: // D-cache data
3059
    case 0x47: // D-cache tag access
3060
    case 0x4b: // E-cache error enable
3061
    case 0x4c: // E-cache asynchronous fault status
3062
    case 0x4d: // E-cache asynchronous fault address
3063
    case 0x4e: // E-cache tag data
3064
    case 0x66: // I-cache instruction access
3065
    case 0x67: // I-cache tag access
3066
    case 0x6e: // I-cache predecode
3067
    case 0x6f: // I-cache LRU etc.
3068
    case 0x76: // E-cache tag
3069
    case 0x7e: // E-cache tag
3070
        return;
3071
    case 0x51: // I-MMU 8k TSB pointer, RO
3072
    case 0x52: // I-MMU 64k TSB pointer, RO
3073
    case 0x56: // I-MMU tag read, RO
3074
    case 0x59: // D-MMU 8k TSB pointer, RO
3075
    case 0x5a: // D-MMU 64k TSB pointer, RO
3076
    case 0x5b: // D-MMU data pointer, RO
3077
    case 0x5e: // D-MMU tag read, RO
3078
    case 0x48: // Interrupt dispatch, RO
3079
    case 0x7f: // Incoming interrupt vector, RO
3080
    case 0x82: // Primary no-fault, RO
3081
    case 0x83: // Secondary no-fault, RO
3082
    case 0x8a: // Primary no-fault LE, RO
3083
    case 0x8b: // Secondary no-fault LE, RO
3084
    default:
3085
        do_unassigned_access(addr, 1, 0, 1, size);
3086
        return;
3087
    }
3088
}
3089
#endif /* CONFIG_USER_ONLY */
3090

    
3091
void helper_ldda_asi(target_ulong addr, int asi, int rd)
3092
{
3093
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
3094
        || (cpu_has_hypervisor(env)
3095
            && asi >= 0x30 && asi < 0x80
3096
            && !(env->hpstate & HS_PRIV)))
3097
        raise_exception(TT_PRIV_ACT);
3098

    
3099
    addr = asi_address_mask(env, asi, addr);
3100

    
3101
    switch (asi) {
3102
#if !defined(CONFIG_USER_ONLY)
3103
    case 0x24: // Nucleus quad LDD 128 bit atomic
3104
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
3105
        helper_check_align(addr, 0xf);
3106
        if (rd == 0) {
3107
            env->gregs[1] = ldq_nucleus(addr + 8);
3108
            if (asi == 0x2c)
3109
                bswap64s(&env->gregs[1]);
3110
        } else if (rd < 8) {
3111
            env->gregs[rd] = ldq_nucleus(addr);
3112
            env->gregs[rd + 1] = ldq_nucleus(addr + 8);
3113
            if (asi == 0x2c) {
3114
                bswap64s(&env->gregs[rd]);
3115
                bswap64s(&env->gregs[rd + 1]);
3116
            }
3117
        } else {
3118
            env->regwptr[rd] = ldq_nucleus(addr);
3119
            env->regwptr[rd + 1] = ldq_nucleus(addr + 8);
3120
            if (asi == 0x2c) {
3121
                bswap64s(&env->regwptr[rd]);
3122
                bswap64s(&env->regwptr[rd + 1]);
3123
            }
3124
        }
3125
        break;
3126
#endif
3127
    default:
3128
        helper_check_align(addr, 0x3);
3129
        if (rd == 0)
3130
            env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
3131
        else if (rd < 8) {
3132
            env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
3133
            env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3134
        } else {
3135
            env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
3136
            env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3137
        }
3138
        break;
3139
    }
3140
}
3141

    
3142
void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
3143
{
3144
    unsigned int i;
3145
    target_ulong val;
3146

    
3147
    helper_check_align(addr, 3);
3148
    addr = asi_address_mask(env, asi, addr);
3149

    
3150
    switch (asi) {
3151
    case 0xf0: // Block load primary
3152
    case 0xf1: // Block load secondary
3153
    case 0xf8: // Block load primary LE
3154
    case 0xf9: // Block load secondary LE
3155
        if (rd & 7) {
3156
            raise_exception(TT_ILL_INSN);
3157
            return;
3158
        }
3159
        helper_check_align(addr, 0x3f);
3160
        for (i = 0; i < 16; i++) {
3161
            *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
3162
                                                         0);
3163
            addr += 4;
3164
        }
3165

    
3166
        return;
3167
    case 0x70: // Block load primary, user privilege
3168
    case 0x71: // Block load secondary, user privilege
3169
        if (rd & 7) {
3170
            raise_exception(TT_ILL_INSN);
3171
            return;
3172
        }
3173
        helper_check_align(addr, 0x3f);
3174
        for (i = 0; i < 16; i++) {
3175
            *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x1f, 4,
3176
                                                         0);
3177
            addr += 4;
3178
        }
3179

    
3180
        return;
3181
    default:
3182
        break;
3183
    }
3184

    
3185
    val = helper_ld_asi(addr, asi, size, 0);
3186
    switch(size) {
3187
    default:
3188
    case 4:
3189
        *((uint32_t *)&env->fpr[rd]) = val;
3190
        break;
3191
    case 8:
3192
        *((int64_t *)&DT0) = val;
3193
        break;
3194
    case 16:
3195
        // XXX
3196
        break;
3197
    }
3198
}
3199

    
3200
void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
3201
{
3202
    unsigned int i;
3203
    target_ulong val = 0;
3204

    
3205
    helper_check_align(addr, 3);
3206
    addr = asi_address_mask(env, asi, addr);
3207

    
3208
    switch (asi) {
3209
    case 0xe0: // UA2007 Block commit store primary (cache flush)
3210
    case 0xe1: // UA2007 Block commit store secondary (cache flush)
3211
    case 0xf0: // Block store primary
3212
    case 0xf1: // Block store secondary
3213
    case 0xf8: // Block store primary LE
3214
    case 0xf9: // Block store secondary LE
3215
        if (rd & 7) {
3216
            raise_exception(TT_ILL_INSN);
3217
            return;
3218
        }
3219
        helper_check_align(addr, 0x3f);
3220
        for (i = 0; i < 16; i++) {
3221
            val = *(uint32_t *)&env->fpr[rd++];
3222
            helper_st_asi(addr, val, asi & 0x8f, 4);
3223
            addr += 4;
3224
        }
3225

    
3226
        return;
3227
    case 0x70: // Block store primary, user privilege
3228
    case 0x71: // Block store secondary, user privilege
3229
        if (rd & 7) {
3230
            raise_exception(TT_ILL_INSN);
3231
            return;
3232
        }
3233
        helper_check_align(addr, 0x3f);
3234
        for (i = 0; i < 16; i++) {
3235
            val = *(uint32_t *)&env->fpr[rd++];
3236
            helper_st_asi(addr, val, asi & 0x1f, 4);
3237
            addr += 4;
3238
        }
3239

    
3240
        return;
3241
    default:
3242
        break;
3243
    }
3244

    
3245
    switch(size) {
3246
    default:
3247
    case 4:
3248
        val = *((uint32_t *)&env->fpr[rd]);
3249
        break;
3250
    case 8:
3251
        val = *((int64_t *)&DT0);
3252
        break;
3253
    case 16:
3254
        // XXX
3255
        break;
3256
    }
3257
    helper_st_asi(addr, val, asi, size);
3258
}
3259

    
3260
target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
3261
                            target_ulong val2, uint32_t asi)
3262
{
3263
    target_ulong ret;
3264

    
3265
    val2 &= 0xffffffffUL;
3266
    ret = helper_ld_asi(addr, asi, 4, 0);
3267
    ret &= 0xffffffffUL;
3268
    if (val2 == ret)
3269
        helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4);
3270
    return ret;
3271
}
3272

    
3273
target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
3274
                             target_ulong val2, uint32_t asi)
3275
{
3276
    target_ulong ret;
3277

    
3278
    ret = helper_ld_asi(addr, asi, 8, 0);
3279
    if (val2 == ret)
3280
        helper_st_asi(addr, val1, asi, 8);
3281
    return ret;
3282
}
3283
#endif /* TARGET_SPARC64 */
3284

    
3285
#ifndef TARGET_SPARC64
3286
void helper_rett(void)
3287
{
3288
    unsigned int cwp;
3289

    
3290
    if (env->psret == 1)
3291
        raise_exception(TT_ILL_INSN);
3292

    
3293
    env->psret = 1;
3294
    cwp = cwp_inc(env->cwp + 1) ;
3295
    if (env->wim & (1 << cwp)) {
3296
        raise_exception(TT_WIN_UNF);
3297
    }
3298
    set_cwp(cwp);
3299
    env->psrs = env->psrps;
3300
}
3301
#endif
3302

    
3303
target_ulong helper_udiv(target_ulong a, target_ulong b)
3304
{
3305
    uint64_t x0;
3306
    uint32_t x1;
3307

    
3308
    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3309
    x1 = b;
3310

    
3311
    if (x1 == 0) {
3312
        raise_exception(TT_DIV_ZERO);
3313
    }
3314

    
3315
    x0 = x0 / x1;
3316
    if (x0 > 0xffffffff) {
3317
        env->cc_src2 = 1;
3318
        return 0xffffffff;
3319
    } else {
3320
        env->cc_src2 = 0;
3321
        return x0;
3322
    }
3323
}
3324

    
3325
target_ulong helper_sdiv(target_ulong a, target_ulong b)
3326
{
3327
    int64_t x0;
3328
    int32_t x1;
3329

    
3330
    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3331
    x1 = b;
3332

    
3333
    if (x1 == 0) {
3334
        raise_exception(TT_DIV_ZERO);
3335
    }
3336

    
3337
    x0 = x0 / x1;
3338
    if ((int32_t) x0 != x0) {
3339
        env->cc_src2 = 1;
3340
        return x0 < 0? 0x80000000: 0x7fffffff;
3341
    } else {
3342
        env->cc_src2 = 0;
3343
        return x0;
3344
    }
3345
}
3346

    
3347
void helper_stdf(target_ulong addr, int mem_idx)
3348
{
3349
    helper_check_align(addr, 7);
3350
#if !defined(CONFIG_USER_ONLY)
3351
    switch (mem_idx) {
3352
    case MMU_USER_IDX:
3353
        stfq_user(addr, DT0);
3354
        break;
3355
    case MMU_KERNEL_IDX:
3356
        stfq_kernel(addr, DT0);
3357
        break;
3358
#ifdef TARGET_SPARC64
3359
    case MMU_HYPV_IDX:
3360
        stfq_hypv(addr, DT0);
3361
        break;
3362
#endif
3363
    default:
3364
        DPRINTF_MMU("helper_stdf: need to check MMU idx %d\n", mem_idx);
3365
        break;
3366
    }
3367
#else
3368
    stfq_raw(address_mask(env, addr), DT0);
3369
#endif
3370
}
3371

    
3372
void helper_lddf(target_ulong addr, int mem_idx)
3373
{
3374
    helper_check_align(addr, 7);
3375
#if !defined(CONFIG_USER_ONLY)
3376
    switch (mem_idx) {
3377
    case MMU_USER_IDX:
3378
        DT0 = ldfq_user(addr);
3379
        break;
3380
    case MMU_KERNEL_IDX:
3381
        DT0 = ldfq_kernel(addr);
3382
        break;
3383
#ifdef TARGET_SPARC64
3384
    case MMU_HYPV_IDX:
3385
        DT0 = ldfq_hypv(addr);
3386
        break;
3387
#endif
3388
    default:
3389
        DPRINTF_MMU("helper_lddf: need to check MMU idx %d\n", mem_idx);
3390
        break;
3391
    }
3392
#else
3393
    DT0 = ldfq_raw(address_mask(env, addr));
3394
#endif
3395
}
3396

    
3397
void helper_ldqf(target_ulong addr, int mem_idx)
3398
{
3399
    // XXX add 128 bit load
3400
    CPU_QuadU u;
3401

    
3402
    helper_check_align(addr, 7);
3403
#if !defined(CONFIG_USER_ONLY)
3404
    switch (mem_idx) {
3405
    case MMU_USER_IDX:
3406
        u.ll.upper = ldq_user(addr);
3407
        u.ll.lower = ldq_user(addr + 8);
3408
        QT0 = u.q;
3409
        break;
3410
    case MMU_KERNEL_IDX:
3411
        u.ll.upper = ldq_kernel(addr);
3412
        u.ll.lower = ldq_kernel(addr + 8);
3413
        QT0 = u.q;
3414
        break;
3415
#ifdef TARGET_SPARC64
3416
    case MMU_HYPV_IDX:
3417
        u.ll.upper = ldq_hypv(addr);
3418
        u.ll.lower = ldq_hypv(addr + 8);
3419
        QT0 = u.q;
3420
        break;
3421
#endif
3422
    default:
3423
        DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx);
3424
        break;
3425
    }
3426
#else
3427
    u.ll.upper = ldq_raw(address_mask(env, addr));
3428
    u.ll.lower = ldq_raw(address_mask(env, addr + 8));
3429
    QT0 = u.q;
3430
#endif
3431
}
3432

    
3433
void helper_stqf(target_ulong addr, int mem_idx)
3434
{
3435
    // XXX add 128 bit store
3436
    CPU_QuadU u;
3437

    
3438
    helper_check_align(addr, 7);
3439
#if !defined(CONFIG_USER_ONLY)
3440
    switch (mem_idx) {
3441
    case MMU_USER_IDX:
3442
        u.q = QT0;
3443
        stq_user(addr, u.ll.upper);
3444
        stq_user(addr + 8, u.ll.lower);
3445
        break;
3446
    case MMU_KERNEL_IDX:
3447
        u.q = QT0;
3448
        stq_kernel(addr, u.ll.upper);
3449
        stq_kernel(addr + 8, u.ll.lower);
3450
        break;
3451
#ifdef TARGET_SPARC64
3452
    case MMU_HYPV_IDX:
3453
        u.q = QT0;
3454
        stq_hypv(addr, u.ll.upper);
3455
        stq_hypv(addr + 8, u.ll.lower);
3456
        break;
3457
#endif
3458
    default:
3459
        DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx);
3460
        break;
3461
    }
3462
#else
3463
    u.q = QT0;
3464
    stq_raw(address_mask(env, addr), u.ll.upper);
3465
    stq_raw(address_mask(env, addr + 8), u.ll.lower);
3466
#endif
3467
}
3468

    
3469
static inline void set_fsr(void)
3470
{
3471
    int rnd_mode;
3472

    
3473
    switch (env->fsr & FSR_RD_MASK) {
3474
    case FSR_RD_NEAREST:
3475
        rnd_mode = float_round_nearest_even;
3476
        break;
3477
    default:
3478
    case FSR_RD_ZERO:
3479
        rnd_mode = float_round_to_zero;
3480
        break;
3481
    case FSR_RD_POS:
3482
        rnd_mode = float_round_up;
3483
        break;
3484
    case FSR_RD_NEG:
3485
        rnd_mode = float_round_down;
3486
        break;
3487
    }
3488
    set_float_rounding_mode(rnd_mode, &env->fp_status);
3489
}
3490

    
3491
void helper_ldfsr(uint32_t new_fsr)
3492
{
3493
    env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK);
3494
    set_fsr();
3495
}
3496

    
3497
#ifdef TARGET_SPARC64
3498
void helper_ldxfsr(uint64_t new_fsr)
3499
{
3500
    env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK);
3501
    set_fsr();
3502
}
3503
#endif
3504

    
3505
void helper_debug(void)
3506
{
3507
    env->exception_index = EXCP_DEBUG;
3508
    cpu_loop_exit();
3509
}
3510

    
3511
#ifndef TARGET_SPARC64
3512
/* XXX: use another pointer for %iN registers to avoid slow wrapping
3513
   handling ? */
3514
void helper_save(void)
3515
{
3516
    uint32_t cwp;
3517

    
3518
    cwp = cwp_dec(env->cwp - 1);
3519
    if (env->wim & (1 << cwp)) {
3520
        raise_exception(TT_WIN_OVF);
3521
    }
3522
    set_cwp(cwp);
3523
}
3524

    
3525
void helper_restore(void)
3526
{
3527
    uint32_t cwp;
3528

    
3529
    cwp = cwp_inc(env->cwp + 1);
3530
    if (env->wim & (1 << cwp)) {
3531
        raise_exception(TT_WIN_UNF);
3532
    }
3533
    set_cwp(cwp);
3534
}
3535

    
3536
void helper_wrpsr(target_ulong new_psr)
3537
{
3538
    if ((new_psr & PSR_CWP) >= env->nwindows) {
3539
        raise_exception(TT_ILL_INSN);
3540
    } else {
3541
        cpu_put_psr(env, new_psr);
3542
    }
3543
}
3544

    
3545
target_ulong helper_rdpsr(void)
3546
{
3547
    return get_psr();
3548
}
3549

    
3550
#else
3551
/* XXX: use another pointer for %iN registers to avoid slow wrapping
3552
   handling ? */
3553
void helper_save(void)
3554
{
3555
    uint32_t cwp;
3556

    
3557
    cwp = cwp_dec(env->cwp - 1);
3558
    if (env->cansave == 0) {
3559
        raise_exception(TT_SPILL | (env->otherwin != 0 ?
3560
                                    (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3561
                                    ((env->wstate & 0x7) << 2)));
3562
    } else {
3563
        if (env->cleanwin - env->canrestore == 0) {
3564
            // XXX Clean windows without trap
3565
            raise_exception(TT_CLRWIN);
3566
        } else {
3567
            env->cansave--;
3568
            env->canrestore++;
3569
            set_cwp(cwp);
3570
        }
3571
    }
3572
}
3573

    
3574
void helper_restore(void)
3575
{
3576
    uint32_t cwp;
3577

    
3578
    cwp = cwp_inc(env->cwp + 1);
3579
    if (env->canrestore == 0) {
3580
        raise_exception(TT_FILL | (env->otherwin != 0 ?
3581
                                   (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3582
                                   ((env->wstate & 0x7) << 2)));
3583
    } else {
3584
        env->cansave++;
3585
        env->canrestore--;
3586
        set_cwp(cwp);
3587
    }
3588
}
3589

    
3590
void helper_flushw(void)
3591
{
3592
    if (env->cansave != env->nwindows - 2) {
3593
        raise_exception(TT_SPILL | (env->otherwin != 0 ?
3594
                                    (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3595
                                    ((env->wstate & 0x7) << 2)));
3596
    }
3597
}
3598

    
3599
void helper_saved(void)
3600
{
3601
    env->cansave++;
3602
    if (env->otherwin == 0)
3603
        env->canrestore--;
3604
    else
3605
        env->otherwin--;
3606
}
3607

    
3608
void helper_restored(void)
3609
{
3610
    env->canrestore++;
3611
    if (env->cleanwin < env->nwindows - 1)
3612
        env->cleanwin++;
3613
    if (env->otherwin == 0)
3614
        env->cansave--;
3615
    else
3616
        env->otherwin--;
3617
}
3618

    
3619
static target_ulong get_ccr(void)
3620
{
3621
    target_ulong psr;
3622

    
3623
    psr = get_psr();
3624

    
3625
    return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20);
3626
}
3627

    
3628
target_ulong cpu_get_ccr(CPUState *env1)
3629
{
3630
    CPUState *saved_env;
3631
    target_ulong ret;
3632

    
3633
    saved_env = env;
3634
    env = env1;
3635
    ret = get_ccr();
3636
    env = saved_env;
3637
    return ret;
3638
}
3639

    
3640
static void put_ccr(target_ulong val)
3641
{
3642
    target_ulong tmp = val;
3643

    
3644
    env->xcc = (tmp >> 4) << 20;
3645
    env->psr = (tmp & 0xf) << 20;
3646
    CC_OP = CC_OP_FLAGS;
3647
}
3648

    
3649
void cpu_put_ccr(CPUState *env1, target_ulong val)
3650
{
3651
    CPUState *saved_env;
3652

    
3653
    saved_env = env;
3654
    env = env1;
3655
    put_ccr(val);
3656
    env = saved_env;
3657
}
3658

    
3659
static target_ulong get_cwp64(void)
3660
{
3661
    return env->nwindows - 1 - env->cwp;
3662
}
3663

    
3664
target_ulong cpu_get_cwp64(CPUState *env1)
3665
{
3666
    CPUState *saved_env;
3667
    target_ulong ret;
3668

    
3669
    saved_env = env;
3670
    env = env1;
3671
    ret = get_cwp64();
3672
    env = saved_env;
3673
    return ret;
3674
}
3675

    
3676
static void put_cwp64(int cwp)
3677
{
3678
    if (unlikely(cwp >= env->nwindows || cwp < 0)) {
3679
        cwp %= env->nwindows;
3680
    }
3681
    set_cwp(env->nwindows - 1 - cwp);
3682
}
3683

    
3684
void cpu_put_cwp64(CPUState *env1, int cwp)
3685
{
3686
    CPUState *saved_env;
3687

    
3688
    saved_env = env;
3689
    env = env1;
3690
    put_cwp64(cwp);
3691
    env = saved_env;
3692
}
3693

    
3694
target_ulong helper_rdccr(void)
3695
{
3696
    return get_ccr();
3697
}
3698

    
3699
void helper_wrccr(target_ulong new_ccr)
3700
{
3701
    put_ccr(new_ccr);
3702
}
3703

    
3704
// CWP handling is reversed in V9, but we still use the V8 register
3705
// order.
3706
target_ulong helper_rdcwp(void)
3707
{
3708
    return get_cwp64();
3709
}
3710

    
3711
void helper_wrcwp(target_ulong new_cwp)
3712
{
3713
    put_cwp64(new_cwp);
3714
}
3715

    
3716
// This function uses non-native bit order
3717
#define GET_FIELD(X, FROM, TO)                                  \
3718
    ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
3719

    
3720
// This function uses the order in the manuals, i.e. bit 0 is 2^0
3721
#define GET_FIELD_SP(X, FROM, TO)               \
3722
    GET_FIELD(X, 63 - (TO), 63 - (FROM))
3723

    
3724
target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
3725
{
3726
    return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
3727
        (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
3728
        (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
3729
        (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
3730
        (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
3731
        (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
3732
        (((pixel_addr >> 55) & 1) << 4) |
3733
        (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
3734
        GET_FIELD_SP(pixel_addr, 11, 12);
3735
}
3736

    
3737
target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
3738
{
3739
    uint64_t tmp;
3740

    
3741
    tmp = addr + offset;
3742
    env->gsr &= ~7ULL;
3743
    env->gsr |= tmp & 7ULL;
3744
    return tmp & ~7ULL;
3745
}
3746

    
3747
target_ulong helper_popc(target_ulong val)
3748
{
3749
    return ctpop64(val);
3750
}
3751

    
3752
static inline uint64_t *get_gregset(uint32_t pstate)
3753
{
3754
    switch (pstate) {
3755
    default:
3756
        DPRINTF_PSTATE("ERROR in get_gregset: active pstate bits=%x%s%s%s\n",
3757
                pstate,
3758
                (pstate & PS_IG) ? " IG" : "",
3759
                (pstate & PS_MG) ? " MG" : "",
3760
                (pstate & PS_AG) ? " AG" : "");
3761
        /* pass through to normal set of global registers */
3762
    case 0:
3763
        return env->bgregs;
3764
    case PS_AG:
3765
        return env->agregs;
3766
    case PS_MG:
3767
        return env->mgregs;
3768
    case PS_IG:
3769
        return env->igregs;
3770
    }
3771
}
3772

    
3773
static inline void change_pstate(uint32_t new_pstate)
3774
{
3775
    uint32_t pstate_regs, new_pstate_regs;
3776
    uint64_t *src, *dst;
3777

    
3778
    if (env->def->features & CPU_FEATURE_GL) {
3779
        // PS_AG is not implemented in this case
3780
        new_pstate &= ~PS_AG;
3781
    }
3782

    
3783
    pstate_regs = env->pstate & 0xc01;
3784
    new_pstate_regs = new_pstate & 0xc01;
3785

    
3786
    if (new_pstate_regs != pstate_regs) {
3787
        DPRINTF_PSTATE("change_pstate: switching regs old=%x new=%x\n",
3788
                       pstate_regs, new_pstate_regs);
3789
        // Switch global register bank
3790
        src = get_gregset(new_pstate_regs);
3791
        dst = get_gregset(pstate_regs);
3792
        memcpy32(dst, env->gregs);
3793
        memcpy32(env->gregs, src);
3794
    }
3795
    else {
3796
        DPRINTF_PSTATE("change_pstate: regs new=%x (unchanged)\n",
3797
                       new_pstate_regs);
3798
    }
3799
    env->pstate = new_pstate;
3800
}
3801

    
3802
void helper_wrpstate(target_ulong new_state)
3803
{
3804
    change_pstate(new_state & 0xf3f);
3805

    
3806
#if !defined(CONFIG_USER_ONLY)
3807
    if (cpu_interrupts_enabled(env)) {
3808
        cpu_check_irqs(env);
3809
    }
3810
#endif
3811
}
3812

    
3813
void helper_wrpil(target_ulong new_pil)
3814
{
3815
#if !defined(CONFIG_USER_ONLY)
3816
    DPRINTF_PSTATE("helper_wrpil old=%x new=%x\n",
3817
                   env->psrpil, (uint32_t)new_pil);
3818

    
3819
    env->psrpil = new_pil;
3820

    
3821
    if (cpu_interrupts_enabled(env)) {
3822
        cpu_check_irqs(env);
3823
    }
3824
#endif
3825
}
3826

    
3827
void helper_done(void)
3828
{
3829
    trap_state* tsptr = cpu_tsptr(env);
3830

    
3831
    env->pc = tsptr->tnpc;
3832
    env->npc = tsptr->tnpc + 4;
3833
    put_ccr(tsptr->tstate >> 32);
3834
    env->asi = (tsptr->tstate >> 24) & 0xff;
3835
    change_pstate((tsptr->tstate >> 8) & 0xf3f);
3836
    put_cwp64(tsptr->tstate & 0xff);
3837
    env->tl--;
3838

    
3839
    DPRINTF_PSTATE("... helper_done tl=%d\n", env->tl);
3840

    
3841
#if !defined(CONFIG_USER_ONLY)
3842
    if (cpu_interrupts_enabled(env)) {
3843
        cpu_check_irqs(env);
3844
    }
3845
#endif
3846
}
3847

    
3848
void helper_retry(void)
3849
{
3850
    trap_state* tsptr = cpu_tsptr(env);
3851

    
3852
    env->pc = tsptr->tpc;
3853
    env->npc = tsptr->tnpc;
3854
    put_ccr(tsptr->tstate >> 32);
3855
    env->asi = (tsptr->tstate >> 24) & 0xff;
3856
    change_pstate((tsptr->tstate >> 8) & 0xf3f);
3857
    put_cwp64(tsptr->tstate & 0xff);
3858
    env->tl--;
3859

    
3860
    DPRINTF_PSTATE("... helper_retry tl=%d\n", env->tl);
3861

    
3862
#if !defined(CONFIG_USER_ONLY)
3863
    if (cpu_interrupts_enabled(env)) {
3864
        cpu_check_irqs(env);
3865
    }
3866
#endif
3867
}
3868

    
3869
static void do_modify_softint(const char* operation, uint32_t value)
3870
{
3871
    if (env->softint != value) {
3872
        env->softint = value;
3873
        DPRINTF_PSTATE(": %s new %08x\n", operation, env->softint);
3874
#if !defined(CONFIG_USER_ONLY)
3875
        if (cpu_interrupts_enabled(env)) {
3876
            cpu_check_irqs(env);
3877
        }
3878
#endif
3879
    }
3880
}
3881

    
3882
void helper_set_softint(uint64_t value)
3883
{
3884
    do_modify_softint("helper_set_softint", env->softint | (uint32_t)value);
3885
}
3886

    
3887
void helper_clear_softint(uint64_t value)
3888
{
3889
    do_modify_softint("helper_clear_softint", env->softint & (uint32_t)~value);
3890
}
3891

    
3892
void helper_write_softint(uint64_t value)
3893
{
3894
    do_modify_softint("helper_write_softint", (uint32_t)value);
3895
}
3896
#endif
3897

    
3898
void helper_flush(target_ulong addr)
3899
{
3900
    addr &= ~7;
3901
    tb_invalidate_page_range(addr, addr + 8);
3902
}
3903

    
3904
#ifdef TARGET_SPARC64
3905
#ifdef DEBUG_PCALL
3906
static const char * const excp_names[0x80] = {
3907
    [TT_TFAULT] = "Instruction Access Fault",
3908
    [TT_TMISS] = "Instruction Access MMU Miss",
3909
    [TT_CODE_ACCESS] = "Instruction Access Error",
3910
    [TT_ILL_INSN] = "Illegal Instruction",
3911
    [TT_PRIV_INSN] = "Privileged Instruction",
3912
    [TT_NFPU_INSN] = "FPU Disabled",
3913
    [TT_FP_EXCP] = "FPU Exception",
3914
    [TT_TOVF] = "Tag Overflow",
3915
    [TT_CLRWIN] = "Clean Windows",
3916
    [TT_DIV_ZERO] = "Division By Zero",
3917
    [TT_DFAULT] = "Data Access Fault",
3918
    [TT_DMISS] = "Data Access MMU Miss",
3919
    [TT_DATA_ACCESS] = "Data Access Error",
3920
    [TT_DPROT] = "Data Protection Error",
3921
    [TT_UNALIGNED] = "Unaligned Memory Access",
3922
    [TT_PRIV_ACT] = "Privileged Action",
3923
    [TT_EXTINT | 0x1] = "External Interrupt 1",
3924
    [TT_EXTINT | 0x2] = "External Interrupt 2",
3925
    [TT_EXTINT | 0x3] = "External Interrupt 3",
3926
    [TT_EXTINT | 0x4] = "External Interrupt 4",
3927
    [TT_EXTINT | 0x5] = "External Interrupt 5",
3928
    [TT_EXTINT | 0x6] = "External Interrupt 6",
3929
    [TT_EXTINT | 0x7] = "External Interrupt 7",
3930
    [TT_EXTINT | 0x8] = "External Interrupt 8",
3931
    [TT_EXTINT | 0x9] = "External Interrupt 9",
3932
    [TT_EXTINT | 0xa] = "External Interrupt 10",
3933
    [TT_EXTINT | 0xb] = "External Interrupt 11",
3934
    [TT_EXTINT | 0xc] = "External Interrupt 12",
3935
    [TT_EXTINT | 0xd] = "External Interrupt 13",
3936
    [TT_EXTINT | 0xe] = "External Interrupt 14",
3937
    [TT_EXTINT | 0xf] = "External Interrupt 15",
3938
};
3939
#endif
3940

    
3941
trap_state* cpu_tsptr(CPUState* env)
3942
{
3943
    return &env->ts[env->tl & MAXTL_MASK];
3944
}
3945

    
3946
void do_interrupt(CPUState *env)
3947
{
3948
    int intno = env->exception_index;
3949
    trap_state* tsptr;
3950

    
3951
#ifdef DEBUG_PCALL
3952
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
3953
        static int count;
3954
        const char *name;
3955

    
3956
        if (intno < 0 || intno >= 0x180)
3957
            name = "Unknown";
3958
        else if (intno >= 0x100)
3959
            name = "Trap Instruction";
3960
        else if (intno >= 0xc0)
3961
            name = "Window Fill";
3962
        else if (intno >= 0x80)
3963
            name = "Window Spill";
3964
        else {
3965
            name = excp_names[intno];
3966
            if (!name)
3967
                name = "Unknown";
3968
        }
3969

    
3970
        qemu_log("%6d: %s (v=%04x) pc=%016" PRIx64 " npc=%016" PRIx64
3971
                " SP=%016" PRIx64 "\n",
3972
                count, name, intno,
3973
                env->pc,
3974
                env->npc, env->regwptr[6]);
3975
        log_cpu_state(env, 0);
3976
#if 0
3977
        {
3978
            int i;
3979
            uint8_t *ptr;
3980

3981
            qemu_log("       code=");
3982
            ptr = (uint8_t *)env->pc;
3983
            for(i = 0; i < 16; i++) {
3984
                qemu_log(" %02x", ldub(ptr + i));
3985
            }
3986
            qemu_log("\n");
3987
        }
3988
#endif
3989
        count++;
3990
    }
3991
#endif
3992
#if !defined(CONFIG_USER_ONLY)
3993
    if (env->tl >= env->maxtl) {
3994
        cpu_abort(env, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
3995
                  " Error state", env->exception_index, env->tl, env->maxtl);
3996
        return;
3997
    }
3998
#endif
3999
    if (env->tl < env->maxtl - 1) {
4000
        env->tl++;
4001
    } else {
4002
        env->pstate |= PS_RED;
4003
        if (env->tl < env->maxtl)
4004
            env->tl++;
4005
    }
4006
    tsptr = cpu_tsptr(env);
4007

    
4008
    tsptr->tstate = (get_ccr() << 32) |
4009
        ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
4010
        get_cwp64();
4011
    tsptr->tpc = env->pc;
4012
    tsptr->tnpc = env->npc;
4013
    tsptr->tt = intno;
4014

    
4015
    switch (intno) {
4016
    case TT_IVEC:
4017
        change_pstate(PS_PEF | PS_PRIV | PS_IG);
4018
        break;
4019
    case TT_TFAULT:
4020
    case TT_DFAULT:
4021
    case TT_TMISS ... TT_TMISS + 3:
4022
    case TT_DMISS ... TT_DMISS + 3:
4023
    case TT_DPROT ... TT_DPROT + 3:
4024
        change_pstate(PS_PEF | PS_PRIV | PS_MG);
4025
        break;
4026
    default:
4027
        change_pstate(PS_PEF | PS_PRIV | PS_AG);
4028
        break;
4029
    }
4030

    
4031
    if (intno == TT_CLRWIN) {
4032
        set_cwp(cwp_dec(env->cwp - 1));
4033
    } else if ((intno & 0x1c0) == TT_SPILL) {
4034
        set_cwp(cwp_dec(env->cwp - env->cansave - 2));
4035
    } else if ((intno & 0x1c0) == TT_FILL) {
4036
        set_cwp(cwp_inc(env->cwp + 1));
4037
    }
4038
    env->tbr &= ~0x7fffULL;
4039
    env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
4040
    env->pc = env->tbr;
4041
    env->npc = env->pc + 4;
4042
    env->exception_index = -1;
4043
}
4044
#else
4045
#ifdef DEBUG_PCALL
4046
static const char * const excp_names[0x80] = {
4047
    [TT_TFAULT] = "Instruction Access Fault",
4048
    [TT_ILL_INSN] = "Illegal Instruction",
4049
    [TT_PRIV_INSN] = "Privileged Instruction",
4050
    [TT_NFPU_INSN] = "FPU Disabled",
4051
    [TT_WIN_OVF] = "Window Overflow",
4052
    [TT_WIN_UNF] = "Window Underflow",
4053
    [TT_UNALIGNED] = "Unaligned Memory Access",
4054
    [TT_FP_EXCP] = "FPU Exception",
4055
    [TT_DFAULT] = "Data Access Fault",
4056
    [TT_TOVF] = "Tag Overflow",
4057
    [TT_EXTINT | 0x1] = "External Interrupt 1",
4058
    [TT_EXTINT | 0x2] = "External Interrupt 2",
4059
    [TT_EXTINT | 0x3] = "External Interrupt 3",
4060
    [TT_EXTINT | 0x4] = "External Interrupt 4",
4061
    [TT_EXTINT | 0x5] = "External Interrupt 5",
4062
    [TT_EXTINT | 0x6] = "External Interrupt 6",
4063
    [TT_EXTINT | 0x7] = "External Interrupt 7",
4064
    [TT_EXTINT | 0x8] = "External Interrupt 8",
4065
    [TT_EXTINT | 0x9] = "External Interrupt 9",
4066
    [TT_EXTINT | 0xa] = "External Interrupt 10",
4067
    [TT_EXTINT | 0xb] = "External Interrupt 11",
4068
    [TT_EXTINT | 0xc] = "External Interrupt 12",
4069
    [TT_EXTINT | 0xd] = "External Interrupt 13",
4070
    [TT_EXTINT | 0xe] = "External Interrupt 14",
4071
    [TT_EXTINT | 0xf] = "External Interrupt 15",
4072
    [TT_TOVF] = "Tag Overflow",
4073
    [TT_CODE_ACCESS] = "Instruction Access Error",
4074
    [TT_DATA_ACCESS] = "Data Access Error",
4075
    [TT_DIV_ZERO] = "Division By Zero",
4076
    [TT_NCP_INSN] = "Coprocessor Disabled",
4077
};
4078
#endif
4079

    
4080
void do_interrupt(CPUState *env)
4081
{
4082
    int cwp, intno = env->exception_index;
4083

    
4084
#ifdef DEBUG_PCALL
4085
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
4086
        static int count;
4087
        const char *name;
4088

    
4089
        if (intno < 0 || intno >= 0x100)
4090
            name = "Unknown";
4091
        else if (intno >= 0x80)
4092
            name = "Trap Instruction";
4093
        else {
4094
            name = excp_names[intno];
4095
            if (!name)
4096
                name = "Unknown";
4097
        }
4098

    
4099
        qemu_log("%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
4100
                count, name, intno,
4101
                env->pc,
4102
                env->npc, env->regwptr[6]);
4103
        log_cpu_state(env, 0);
4104
#if 0
4105
        {
4106
            int i;
4107
            uint8_t *ptr;
4108

4109
            qemu_log("       code=");
4110
            ptr = (uint8_t *)env->pc;
4111
            for(i = 0; i < 16; i++) {
4112
                qemu_log(" %02x", ldub(ptr + i));
4113
            }
4114
            qemu_log("\n");
4115
        }
4116
#endif
4117
        count++;
4118
    }
4119
#endif
4120
#if !defined(CONFIG_USER_ONLY)
4121
    if (env->psret == 0) {
4122
        cpu_abort(env, "Trap 0x%02x while interrupts disabled, Error state",
4123
                  env->exception_index);
4124
        return;
4125
    }
4126
#endif
4127
    env->psret = 0;
4128
    cwp = cwp_dec(env->cwp - 1);
4129
    set_cwp(cwp);
4130
    env->regwptr[9] = env->pc;
4131
    env->regwptr[10] = env->npc;
4132
    env->psrps = env->psrs;
4133
    env->psrs = 1;
4134
    env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
4135
    env->pc = env->tbr;
4136
    env->npc = env->pc + 4;
4137
    env->exception_index = -1;
4138
}
4139
#endif
4140

    
4141
#if !defined(CONFIG_USER_ONLY)
4142

    
4143
static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4144
                                void *retaddr);
4145

    
4146
#define MMUSUFFIX _mmu
4147
#define ALIGNED_ONLY
4148

    
4149
#define SHIFT 0
4150
#include "softmmu_template.h"
4151

    
4152
#define SHIFT 1
4153
#include "softmmu_template.h"
4154

    
4155
#define SHIFT 2
4156
#include "softmmu_template.h"
4157

    
4158
#define SHIFT 3
4159
#include "softmmu_template.h"
4160

    
4161
/* XXX: make it generic ? */
4162
static void cpu_restore_state2(void *retaddr)
4163
{
4164
    TranslationBlock *tb;
4165
    unsigned long pc;
4166

    
4167
    if (retaddr) {
4168
        /* now we have a real cpu fault */
4169
        pc = (unsigned long)retaddr;
4170
        tb = tb_find_pc(pc);
4171
        if (tb) {
4172
            /* the PC is inside the translated code. It means that we have
4173
               a virtual CPU fault */
4174
            cpu_restore_state(tb, env, pc, (void *)(long)env->cond);
4175
        }
4176
    }
4177
}
4178

    
4179
static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4180
                                void *retaddr)
4181
{
4182
#ifdef DEBUG_UNALIGNED
4183
    printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
4184
           "\n", addr, env->pc);
4185
#endif
4186
    cpu_restore_state2(retaddr);
4187
    raise_exception(TT_UNALIGNED);
4188
}
4189

    
4190
/* try to fill the TLB and return an exception if error. If retaddr is
4191
   NULL, it means that the function was called in C code (i.e. not
4192
   from generated code or from helper.c) */
4193
/* XXX: fix it to restore all registers */
4194
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4195
{
4196
    int ret;
4197
    CPUState *saved_env;
4198

    
4199
    /* XXX: hack to restore env in all cases, even if not called from
4200
       generated code */
4201
    saved_env = env;
4202
    env = cpu_single_env;
4203

    
4204
    ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4205
    if (ret) {
4206
        cpu_restore_state2(retaddr);
4207
        cpu_loop_exit();
4208
    }
4209
    env = saved_env;
4210
}
4211

    
4212
#endif /* !CONFIG_USER_ONLY */
4213

    
4214
#ifndef TARGET_SPARC64
4215
#if !defined(CONFIG_USER_ONLY)
4216
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
4217
                          int is_asi, int size)
4218
{
4219
    CPUState *saved_env;
4220
    int fault_type;
4221

    
4222
    /* XXX: hack to restore env in all cases, even if not called from
4223
       generated code */
4224
    saved_env = env;
4225
    env = cpu_single_env;
4226
#ifdef DEBUG_UNASSIGNED
4227
    if (is_asi)
4228
        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4229
               " asi 0x%02x from " TARGET_FMT_lx "\n",
4230
               is_exec ? "exec" : is_write ? "write" : "read", size,
4231
               size == 1 ? "" : "s", addr, is_asi, env->pc);
4232
    else
4233
        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4234
               " from " TARGET_FMT_lx "\n",
4235
               is_exec ? "exec" : is_write ? "write" : "read", size,
4236
               size == 1 ? "" : "s", addr, env->pc);
4237
#endif
4238
    /* Don't overwrite translation and access faults */
4239
    fault_type = (env->mmuregs[3] & 0x1c) >> 2;
4240
    if ((fault_type > 4) || (fault_type == 0)) {
4241
        env->mmuregs[3] = 0; /* Fault status register */
4242
        if (is_asi)
4243
            env->mmuregs[3] |= 1 << 16;
4244
        if (env->psrs)
4245
            env->mmuregs[3] |= 1 << 5;
4246
        if (is_exec)
4247
            env->mmuregs[3] |= 1 << 6;
4248
        if (is_write)
4249
            env->mmuregs[3] |= 1 << 7;
4250
        env->mmuregs[3] |= (5 << 2) | 2;
4251
        /* SuperSPARC will never place instruction fault addresses in the FAR */
4252
        if (!is_exec) {
4253
            env->mmuregs[4] = addr; /* Fault address register */
4254
        }
4255
    }
4256
    /* overflow (same type fault was not read before another fault) */
4257
    if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
4258
        env->mmuregs[3] |= 1;
4259
    }
4260

    
4261
    if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
4262
        if (is_exec)
4263
            raise_exception(TT_CODE_ACCESS);
4264
        else
4265
            raise_exception(TT_DATA_ACCESS);
4266
    }
4267

    
4268
    /* flush neverland mappings created during no-fault mode,
4269
       so the sequential MMU faults report proper fault types */
4270
    if (env->mmuregs[0] & MMU_NF) {
4271
        tlb_flush(env, 1);
4272
    }
4273

    
4274
    env = saved_env;
4275
}
4276
#endif
4277
#else
4278
#if defined(CONFIG_USER_ONLY)
4279
static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
4280
                          int is_asi, int size)
4281
#else
4282
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
4283
                          int is_asi, int size)
4284
#endif
4285
{
4286
    CPUState *saved_env;
4287

    
4288
    /* XXX: hack to restore env in all cases, even if not called from
4289
       generated code */
4290
    saved_env = env;
4291
    env = cpu_single_env;
4292

    
4293
#ifdef DEBUG_UNASSIGNED
4294
    printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
4295
           "\n", addr, env->pc);
4296
#endif
4297

    
4298
    if (is_exec)
4299
        raise_exception(TT_CODE_ACCESS);
4300
    else
4301
        raise_exception(TT_DATA_ACCESS);
4302

    
4303
    env = saved_env;
4304
}
4305
#endif
4306

    
4307

    
4308
#ifdef TARGET_SPARC64
4309
void helper_tick_set_count(void *opaque, uint64_t count)
4310
{
4311
#if !defined(CONFIG_USER_ONLY)
4312
    cpu_tick_set_count(opaque, count);
4313
#endif
4314
}
4315

    
4316
uint64_t helper_tick_get_count(void *opaque)
4317
{
4318
#if !defined(CONFIG_USER_ONLY)
4319
    return cpu_tick_get_count(opaque);
4320
#else
4321
    return 0;
4322
#endif
4323
}
4324

    
4325
void helper_tick_set_limit(void *opaque, uint64_t limit)
4326
{
4327
#if !defined(CONFIG_USER_ONLY)
4328
    cpu_tick_set_limit(opaque, limit);
4329
#endif
4330
}
4331
#endif