Statistics
| Branch: | Revision:

root / target-sparc / op_helper.c @ d780a466

History | View | Annotate | Download (102 kB)

1
#include "exec.h"
2
#include "host-utils.h"
3
#include "helper.h"
4
#if !defined(CONFIG_USER_ONLY)
5
#include "softmmu_exec.h"
6
#endif /* !defined(CONFIG_USER_ONLY) */
7

    
8
//#define DEBUG_MMU
9
//#define DEBUG_MXCC
10
//#define DEBUG_UNALIGNED
11
//#define DEBUG_UNASSIGNED
12
//#define DEBUG_ASI
13
//#define DEBUG_PCALL
14

    
15
#ifdef DEBUG_MMU
16
#define DPRINTF_MMU(fmt, ...)                                   \
17
    do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
18
#else
19
#define DPRINTF_MMU(fmt, ...) do {} while (0)
20
#endif
21

    
22
#ifdef DEBUG_MXCC
23
#define DPRINTF_MXCC(fmt, ...)                                  \
24
    do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
25
#else
26
#define DPRINTF_MXCC(fmt, ...) do {} while (0)
27
#endif
28

    
29
#ifdef DEBUG_ASI
30
#define DPRINTF_ASI(fmt, ...)                                   \
31
    do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
32
#endif
33

    
34
#ifdef TARGET_SPARC64
35
#ifndef TARGET_ABI32
36
#define AM_CHECK(env1) ((env1)->pstate & PS_AM)
37
#else
38
#define AM_CHECK(env1) (1)
39
#endif
40
#endif
41

    
42
#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
43
// Calculates TSB pointer value for fault page size 8k or 64k
44
static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
45
                                       uint64_t tag_access_register,
46
                                       int page_size)
47
{
48
    uint64_t tsb_base = tsb_register & ~0x1fffULL;
49
    int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
50
    int tsb_size  = tsb_register & 0xf;
51

    
52
    // discard lower 13 bits which hold tag access context
53
    uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
54

    
55
    // now reorder bits
56
    uint64_t tsb_base_mask = ~0x1fffULL;
57
    uint64_t va = tag_access_va;
58

    
59
    // move va bits to correct position
60
    if (page_size == 8*1024) {
61
        va >>= 9;
62
    } else if (page_size == 64*1024) {
63
        va >>= 12;
64
    }
65

    
66
    if (tsb_size) {
67
        tsb_base_mask <<= tsb_size;
68
    }
69

    
70
    // calculate tsb_base mask and adjust va if split is in use
71
    if (tsb_split) {
72
        if (page_size == 8*1024) {
73
            va &= ~(1ULL << (13 + tsb_size));
74
        } else if (page_size == 64*1024) {
75
            va |= (1ULL << (13 + tsb_size));
76
        }
77
        tsb_base_mask <<= 1;
78
    }
79

    
80
    return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
81
}
82

    
83
// Calculates tag target register value by reordering bits
84
// in tag access register
85
static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
86
{
87
    return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
88
}
89

    
90
static void replace_tlb_entry(SparcTLBEntry *tlb,
91
                              uint64_t tlb_tag, uint64_t tlb_tte,
92
                              CPUState *env1)
93
{
94
    target_ulong mask, size, va, offset;
95

    
96
    // flush page range if translation is valid
97
    if (TTE_IS_VALID(tlb->tte)) {
98

    
99
        mask = 0xffffffffffffe000ULL;
100
        mask <<= 3 * ((tlb->tte >> 61) & 3);
101
        size = ~mask + 1;
102

    
103
        va = tlb->tag & mask;
104

    
105
        for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
106
            tlb_flush_page(env1, va + offset);
107
        }
108
    }
109

    
110
    tlb->tag = tlb_tag;
111
    tlb->tte = tlb_tte;
112
}
113

    
114
static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
115
                      const char* strmmu, CPUState *env1)
116
{
117
    unsigned int i;
118
    target_ulong mask;
119

    
120
    for (i = 0; i < 64; i++) {
121
        if (TTE_IS_VALID(tlb[i].tte)) {
122

    
123
            mask = 0xffffffffffffe000ULL;
124
            mask <<= 3 * ((tlb[i].tte >> 61) & 3);
125

    
126
            if ((demap_addr & mask) == (tlb[i].tag & mask)) {
127
                replace_tlb_entry(&tlb[i], 0, 0, env1);
128
#ifdef DEBUG_MMU
129
                DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
130
                dump_mmu(env1);
131
#endif
132
            }
133
            //return;
134
        }
135
    }
136

    
137
}
138

    
139
static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
140
                                 uint64_t tlb_tag, uint64_t tlb_tte,
141
                                 const char* strmmu, CPUState *env1)
142
{
143
    unsigned int i, replace_used;
144

    
145
    // Try replacing invalid entry
146
    for (i = 0; i < 64; i++) {
147
        if (!TTE_IS_VALID(tlb[i].tte)) {
148
            replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
149
#ifdef DEBUG_MMU
150
            DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
151
            dump_mmu(env1);
152
#endif
153
            return;
154
        }
155
    }
156

    
157
    // All entries are valid, try replacing unlocked entry
158

    
159
    for (replace_used = 0; replace_used < 2; ++replace_used) {
160

    
161
        // Used entries are not replaced on first pass
162

    
163
        for (i = 0; i < 64; i++) {
164
            if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
165

    
166
                replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
167
#ifdef DEBUG_MMU
168
                DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
169
                            strmmu, (replace_used?"used":"unused"), i);
170
                dump_mmu(env1);
171
#endif
172
                return;
173
            }
174
        }
175

    
176
        // Now reset used bit and search for unused entries again
177

    
178
        for (i = 0; i < 64; i++) {
179
            TTE_SET_UNUSED(tlb[i].tte);
180
        }
181
    }
182

    
183
#ifdef DEBUG_MMU
184
    DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
185
#endif
186
    // error state?
187
}
188

    
189
#endif
190

    
191
static inline void address_mask(CPUState *env1, target_ulong *addr)
192
{
193
#ifdef TARGET_SPARC64
194
    if (AM_CHECK(env1))
195
        *addr &= 0xffffffffULL;
196
#endif
197
}
198

    
199
static void raise_exception(int tt)
200
{
201
    env->exception_index = tt;
202
    cpu_loop_exit();
203
}
204

    
205
void HELPER(raise_exception)(int tt)
206
{
207
    raise_exception(tt);
208
}
209

    
210
static inline void set_cwp(int new_cwp)
211
{
212
    cpu_set_cwp(env, new_cwp);
213
}
214

    
215
void helper_check_align(target_ulong addr, uint32_t align)
216
{
217
    if (addr & align) {
218
#ifdef DEBUG_UNALIGNED
219
    printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
220
           "\n", addr, env->pc);
221
#endif
222
        raise_exception(TT_UNALIGNED);
223
    }
224
}
225

    
226
#define F_HELPER(name, p) void helper_f##name##p(void)
227

    
228
#define F_BINOP(name)                                           \
229
    float32 helper_f ## name ## s (float32 src1, float32 src2)  \
230
    {                                                           \
231
        return float32_ ## name (src1, src2, &env->fp_status);  \
232
    }                                                           \
233
    F_HELPER(name, d)                                           \
234
    {                                                           \
235
        DT0 = float64_ ## name (DT0, DT1, &env->fp_status);     \
236
    }                                                           \
237
    F_HELPER(name, q)                                           \
238
    {                                                           \
239
        QT0 = float128_ ## name (QT0, QT1, &env->fp_status);    \
240
    }
241

    
242
F_BINOP(add);
243
F_BINOP(sub);
244
F_BINOP(mul);
245
F_BINOP(div);
246
#undef F_BINOP
247

    
248
void helper_fsmuld(float32 src1, float32 src2)
249
{
250
    DT0 = float64_mul(float32_to_float64(src1, &env->fp_status),
251
                      float32_to_float64(src2, &env->fp_status),
252
                      &env->fp_status);
253
}
254

    
255
void helper_fdmulq(void)
256
{
257
    QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
258
                       float64_to_float128(DT1, &env->fp_status),
259
                       &env->fp_status);
260
}
261

    
262
float32 helper_fnegs(float32 src)
263
{
264
    return float32_chs(src);
265
}
266

    
267
#ifdef TARGET_SPARC64
268
F_HELPER(neg, d)
269
{
270
    DT0 = float64_chs(DT1);
271
}
272

    
273
F_HELPER(neg, q)
274
{
275
    QT0 = float128_chs(QT1);
276
}
277
#endif
278

    
279
/* Integer to float conversion.  */
280
float32 helper_fitos(int32_t src)
281
{
282
    return int32_to_float32(src, &env->fp_status);
283
}
284

    
285
void helper_fitod(int32_t src)
286
{
287
    DT0 = int32_to_float64(src, &env->fp_status);
288
}
289

    
290
void helper_fitoq(int32_t src)
291
{
292
    QT0 = int32_to_float128(src, &env->fp_status);
293
}
294

    
295
#ifdef TARGET_SPARC64
296
float32 helper_fxtos(void)
297
{
298
    return int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
299
}
300

    
301
F_HELPER(xto, d)
302
{
303
    DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
304
}
305

    
306
F_HELPER(xto, q)
307
{
308
    QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
309
}
310
#endif
311
#undef F_HELPER
312

    
313
/* floating point conversion */
314
float32 helper_fdtos(void)
315
{
316
    return float64_to_float32(DT1, &env->fp_status);
317
}
318

    
319
void helper_fstod(float32 src)
320
{
321
    DT0 = float32_to_float64(src, &env->fp_status);
322
}
323

    
324
float32 helper_fqtos(void)
325
{
326
    return float128_to_float32(QT1, &env->fp_status);
327
}
328

    
329
void helper_fstoq(float32 src)
330
{
331
    QT0 = float32_to_float128(src, &env->fp_status);
332
}
333

    
334
void helper_fqtod(void)
335
{
336
    DT0 = float128_to_float64(QT1, &env->fp_status);
337
}
338

    
339
void helper_fdtoq(void)
340
{
341
    QT0 = float64_to_float128(DT1, &env->fp_status);
342
}
343

    
344
/* Float to integer conversion.  */
345
int32_t helper_fstoi(float32 src)
346
{
347
    return float32_to_int32_round_to_zero(src, &env->fp_status);
348
}
349

    
350
int32_t helper_fdtoi(void)
351
{
352
    return float64_to_int32_round_to_zero(DT1, &env->fp_status);
353
}
354

    
355
int32_t helper_fqtoi(void)
356
{
357
    return float128_to_int32_round_to_zero(QT1, &env->fp_status);
358
}
359

    
360
#ifdef TARGET_SPARC64
361
void helper_fstox(float32 src)
362
{
363
    *((int64_t *)&DT0) = float32_to_int64_round_to_zero(src, &env->fp_status);
364
}
365

    
366
void helper_fdtox(void)
367
{
368
    *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
369
}
370

    
371
void helper_fqtox(void)
372
{
373
    *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
374
}
375

    
376
void helper_faligndata(void)
377
{
378
    uint64_t tmp;
379

    
380
    tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
381
    /* on many architectures a shift of 64 does nothing */
382
    if ((env->gsr & 7) != 0) {
383
        tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
384
    }
385
    *((uint64_t *)&DT0) = tmp;
386
}
387

    
388
#ifdef HOST_WORDS_BIGENDIAN
389
#define VIS_B64(n) b[7 - (n)]
390
#define VIS_W64(n) w[3 - (n)]
391
#define VIS_SW64(n) sw[3 - (n)]
392
#define VIS_L64(n) l[1 - (n)]
393
#define VIS_B32(n) b[3 - (n)]
394
#define VIS_W32(n) w[1 - (n)]
395
#else
396
#define VIS_B64(n) b[n]
397
#define VIS_W64(n) w[n]
398
#define VIS_SW64(n) sw[n]
399
#define VIS_L64(n) l[n]
400
#define VIS_B32(n) b[n]
401
#define VIS_W32(n) w[n]
402
#endif
403

    
404
typedef union {
405
    uint8_t b[8];
406
    uint16_t w[4];
407
    int16_t sw[4];
408
    uint32_t l[2];
409
    float64 d;
410
} vis64;
411

    
412
typedef union {
413
    uint8_t b[4];
414
    uint16_t w[2];
415
    uint32_t l;
416
    float32 f;
417
} vis32;
418

    
419
void helper_fpmerge(void)
420
{
421
    vis64 s, d;
422

    
423
    s.d = DT0;
424
    d.d = DT1;
425

    
426
    // Reverse calculation order to handle overlap
427
    d.VIS_B64(7) = s.VIS_B64(3);
428
    d.VIS_B64(6) = d.VIS_B64(3);
429
    d.VIS_B64(5) = s.VIS_B64(2);
430
    d.VIS_B64(4) = d.VIS_B64(2);
431
    d.VIS_B64(3) = s.VIS_B64(1);
432
    d.VIS_B64(2) = d.VIS_B64(1);
433
    d.VIS_B64(1) = s.VIS_B64(0);
434
    //d.VIS_B64(0) = d.VIS_B64(0);
435

    
436
    DT0 = d.d;
437
}
438

    
439
void helper_fmul8x16(void)
440
{
441
    vis64 s, d;
442
    uint32_t tmp;
443

    
444
    s.d = DT0;
445
    d.d = DT1;
446

    
447
#define PMUL(r)                                                 \
448
    tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r);       \
449
    if ((tmp & 0xff) > 0x7f)                                    \
450
        tmp += 0x100;                                           \
451
    d.VIS_W64(r) = tmp >> 8;
452

    
453
    PMUL(0);
454
    PMUL(1);
455
    PMUL(2);
456
    PMUL(3);
457
#undef PMUL
458

    
459
    DT0 = d.d;
460
}
461

    
462
void helper_fmul8x16al(void)
463
{
464
    vis64 s, d;
465
    uint32_t tmp;
466

    
467
    s.d = DT0;
468
    d.d = DT1;
469

    
470
#define PMUL(r)                                                 \
471
    tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r);       \
472
    if ((tmp & 0xff) > 0x7f)                                    \
473
        tmp += 0x100;                                           \
474
    d.VIS_W64(r) = tmp >> 8;
475

    
476
    PMUL(0);
477
    PMUL(1);
478
    PMUL(2);
479
    PMUL(3);
480
#undef PMUL
481

    
482
    DT0 = d.d;
483
}
484

    
485
void helper_fmul8x16au(void)
486
{
487
    vis64 s, d;
488
    uint32_t tmp;
489

    
490
    s.d = DT0;
491
    d.d = DT1;
492

    
493
#define PMUL(r)                                                 \
494
    tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r);       \
495
    if ((tmp & 0xff) > 0x7f)                                    \
496
        tmp += 0x100;                                           \
497
    d.VIS_W64(r) = tmp >> 8;
498

    
499
    PMUL(0);
500
    PMUL(1);
501
    PMUL(2);
502
    PMUL(3);
503
#undef PMUL
504

    
505
    DT0 = d.d;
506
}
507

    
508
void helper_fmul8sux16(void)
509
{
510
    vis64 s, d;
511
    uint32_t tmp;
512

    
513
    s.d = DT0;
514
    d.d = DT1;
515

    
516
#define PMUL(r)                                                         \
517
    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
518
    if ((tmp & 0xff) > 0x7f)                                            \
519
        tmp += 0x100;                                                   \
520
    d.VIS_W64(r) = tmp >> 8;
521

    
522
    PMUL(0);
523
    PMUL(1);
524
    PMUL(2);
525
    PMUL(3);
526
#undef PMUL
527

    
528
    DT0 = d.d;
529
}
530

    
531
void helper_fmul8ulx16(void)
532
{
533
    vis64 s, d;
534
    uint32_t tmp;
535

    
536
    s.d = DT0;
537
    d.d = DT1;
538

    
539
#define PMUL(r)                                                         \
540
    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
541
    if ((tmp & 0xff) > 0x7f)                                            \
542
        tmp += 0x100;                                                   \
543
    d.VIS_W64(r) = tmp >> 8;
544

    
545
    PMUL(0);
546
    PMUL(1);
547
    PMUL(2);
548
    PMUL(3);
549
#undef PMUL
550

    
551
    DT0 = d.d;
552
}
553

    
554
void helper_fmuld8sux16(void)
555
{
556
    vis64 s, d;
557
    uint32_t tmp;
558

    
559
    s.d = DT0;
560
    d.d = DT1;
561

    
562
#define PMUL(r)                                                         \
563
    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
564
    if ((tmp & 0xff) > 0x7f)                                            \
565
        tmp += 0x100;                                                   \
566
    d.VIS_L64(r) = tmp;
567

    
568
    // Reverse calculation order to handle overlap
569
    PMUL(1);
570
    PMUL(0);
571
#undef PMUL
572

    
573
    DT0 = d.d;
574
}
575

    
576
void helper_fmuld8ulx16(void)
577
{
578
    vis64 s, d;
579
    uint32_t tmp;
580

    
581
    s.d = DT0;
582
    d.d = DT1;
583

    
584
#define PMUL(r)                                                         \
585
    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
586
    if ((tmp & 0xff) > 0x7f)                                            \
587
        tmp += 0x100;                                                   \
588
    d.VIS_L64(r) = tmp;
589

    
590
    // Reverse calculation order to handle overlap
591
    PMUL(1);
592
    PMUL(0);
593
#undef PMUL
594

    
595
    DT0 = d.d;
596
}
597

    
598
void helper_fexpand(void)
599
{
600
    vis32 s;
601
    vis64 d;
602

    
603
    s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
604
    d.d = DT1;
605
    d.VIS_W64(0) = s.VIS_B32(0) << 4;
606
    d.VIS_W64(1) = s.VIS_B32(1) << 4;
607
    d.VIS_W64(2) = s.VIS_B32(2) << 4;
608
    d.VIS_W64(3) = s.VIS_B32(3) << 4;
609

    
610
    DT0 = d.d;
611
}
612

    
613
#define VIS_HELPER(name, F)                             \
614
    void name##16(void)                                 \
615
    {                                                   \
616
        vis64 s, d;                                     \
617
                                                        \
618
        s.d = DT0;                                      \
619
        d.d = DT1;                                      \
620
                                                        \
621
        d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0));   \
622
        d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1));   \
623
        d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2));   \
624
        d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3));   \
625
                                                        \
626
        DT0 = d.d;                                      \
627
    }                                                   \
628
                                                        \
629
    uint32_t name##16s(uint32_t src1, uint32_t src2)    \
630
    {                                                   \
631
        vis32 s, d;                                     \
632
                                                        \
633
        s.l = src1;                                     \
634
        d.l = src2;                                     \
635
                                                        \
636
        d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0));   \
637
        d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1));   \
638
                                                        \
639
        return d.l;                                     \
640
    }                                                   \
641
                                                        \
642
    void name##32(void)                                 \
643
    {                                                   \
644
        vis64 s, d;                                     \
645
                                                        \
646
        s.d = DT0;                                      \
647
        d.d = DT1;                                      \
648
                                                        \
649
        d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0));   \
650
        d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1));   \
651
                                                        \
652
        DT0 = d.d;                                      \
653
    }                                                   \
654
                                                        \
655
    uint32_t name##32s(uint32_t src1, uint32_t src2)    \
656
    {                                                   \
657
        vis32 s, d;                                     \
658
                                                        \
659
        s.l = src1;                                     \
660
        d.l = src2;                                     \
661
                                                        \
662
        d.l = F(d.l, s.l);                              \
663
                                                        \
664
        return d.l;                                     \
665
    }
666

    
667
#define FADD(a, b) ((a) + (b))
668
#define FSUB(a, b) ((a) - (b))
669
VIS_HELPER(helper_fpadd, FADD)
670
VIS_HELPER(helper_fpsub, FSUB)
671

    
672
#define VIS_CMPHELPER(name, F)                                        \
673
    void name##16(void)                                           \
674
    {                                                             \
675
        vis64 s, d;                                               \
676
                                                                  \
677
        s.d = DT0;                                                \
678
        d.d = DT1;                                                \
679
                                                                  \
680
        d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0;       \
681
        d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0;      \
682
        d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0;      \
683
        d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0;      \
684
                                                                  \
685
        DT0 = d.d;                                                \
686
    }                                                             \
687
                                                                  \
688
    void name##32(void)                                           \
689
    {                                                             \
690
        vis64 s, d;                                               \
691
                                                                  \
692
        s.d = DT0;                                                \
693
        d.d = DT1;                                                \
694
                                                                  \
695
        d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0;       \
696
        d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0;      \
697
                                                                  \
698
        DT0 = d.d;                                                \
699
    }
700

    
701
#define FCMPGT(a, b) ((a) > (b))
702
#define FCMPEQ(a, b) ((a) == (b))
703
#define FCMPLE(a, b) ((a) <= (b))
704
#define FCMPNE(a, b) ((a) != (b))
705

    
706
VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
707
VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
708
VIS_CMPHELPER(helper_fcmple, FCMPLE)
709
VIS_CMPHELPER(helper_fcmpne, FCMPNE)
710
#endif
711

    
712
void helper_check_ieee_exceptions(void)
713
{
714
    target_ulong status;
715

    
716
    status = get_float_exception_flags(&env->fp_status);
717
    if (status) {
718
        /* Copy IEEE 754 flags into FSR */
719
        if (status & float_flag_invalid)
720
            env->fsr |= FSR_NVC;
721
        if (status & float_flag_overflow)
722
            env->fsr |= FSR_OFC;
723
        if (status & float_flag_underflow)
724
            env->fsr |= FSR_UFC;
725
        if (status & float_flag_divbyzero)
726
            env->fsr |= FSR_DZC;
727
        if (status & float_flag_inexact)
728
            env->fsr |= FSR_NXC;
729

    
730
        if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
731
            /* Unmasked exception, generate a trap */
732
            env->fsr |= FSR_FTT_IEEE_EXCP;
733
            raise_exception(TT_FP_EXCP);
734
        } else {
735
            /* Accumulate exceptions */
736
            env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
737
        }
738
    }
739
}
740

    
741
void helper_clear_float_exceptions(void)
742
{
743
    set_float_exception_flags(0, &env->fp_status);
744
}
745

    
746
float32 helper_fabss(float32 src)
747
{
748
    return float32_abs(src);
749
}
750

    
751
#ifdef TARGET_SPARC64
752
void helper_fabsd(void)
753
{
754
    DT0 = float64_abs(DT1);
755
}
756

    
757
void helper_fabsq(void)
758
{
759
    QT0 = float128_abs(QT1);
760
}
761
#endif
762

    
763
float32 helper_fsqrts(float32 src)
764
{
765
    return float32_sqrt(src, &env->fp_status);
766
}
767

    
768
void helper_fsqrtd(void)
769
{
770
    DT0 = float64_sqrt(DT1, &env->fp_status);
771
}
772

    
773
void helper_fsqrtq(void)
774
{
775
    QT0 = float128_sqrt(QT1, &env->fp_status);
776
}
777

    
778
#define GEN_FCMP(name, size, reg1, reg2, FS, TRAP)                      \
779
    void glue(helper_, name) (void)                                     \
780
    {                                                                   \
781
        target_ulong new_fsr;                                           \
782
                                                                        \
783
        env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                     \
784
        switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) {   \
785
        case float_relation_unordered:                                  \
786
            new_fsr = (FSR_FCC1 | FSR_FCC0) << FS;                      \
787
            if ((env->fsr & FSR_NVM) || TRAP) {                         \
788
                env->fsr |= new_fsr;                                    \
789
                env->fsr |= FSR_NVC;                                    \
790
                env->fsr |= FSR_FTT_IEEE_EXCP;                          \
791
                raise_exception(TT_FP_EXCP);                            \
792
            } else {                                                    \
793
                env->fsr |= FSR_NVA;                                    \
794
            }                                                           \
795
            break;                                                      \
796
        case float_relation_less:                                       \
797
            new_fsr = FSR_FCC0 << FS;                                   \
798
            break;                                                      \
799
        case float_relation_greater:                                    \
800
            new_fsr = FSR_FCC1 << FS;                                   \
801
            break;                                                      \
802
        default:                                                        \
803
            new_fsr = 0;                                                \
804
            break;                                                      \
805
        }                                                               \
806
        env->fsr |= new_fsr;                                            \
807
    }
808
#define GEN_FCMPS(name, size, FS, TRAP)                                 \
809
    void glue(helper_, name)(float32 src1, float32 src2)                \
810
    {                                                                   \
811
        target_ulong new_fsr;                                           \
812
                                                                        \
813
        env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                     \
814
        switch (glue(size, _compare) (src1, src2, &env->fp_status)) {   \
815
        case float_relation_unordered:                                  \
816
            new_fsr = (FSR_FCC1 | FSR_FCC0) << FS;                      \
817
            if ((env->fsr & FSR_NVM) || TRAP) {                         \
818
                env->fsr |= new_fsr;                                    \
819
                env->fsr |= FSR_NVC;                                    \
820
                env->fsr |= FSR_FTT_IEEE_EXCP;                          \
821
                raise_exception(TT_FP_EXCP);                            \
822
            } else {                                                    \
823
                env->fsr |= FSR_NVA;                                    \
824
            }                                                           \
825
            break;                                                      \
826
        case float_relation_less:                                       \
827
            new_fsr = FSR_FCC0 << FS;                                   \
828
            break;                                                      \
829
        case float_relation_greater:                                    \
830
            new_fsr = FSR_FCC1 << FS;                                   \
831
            break;                                                      \
832
        default:                                                        \
833
            new_fsr = 0;                                                \
834
            break;                                                      \
835
        }                                                               \
836
        env->fsr |= new_fsr;                                            \
837
    }
838

    
839
GEN_FCMPS(fcmps, float32, 0, 0);
840
GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
841

    
842
GEN_FCMPS(fcmpes, float32, 0, 1);
843
GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
844

    
845
GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
846
GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
847

    
848
static uint32_t compute_all_flags(void)
849
{
850
    return env->psr & PSR_ICC;
851
}
852

    
853
static uint32_t compute_C_flags(void)
854
{
855
    return env->psr & PSR_CARRY;
856
}
857

    
858
static inline uint32_t get_NZ_icc(target_ulong dst)
859
{
860
    uint32_t ret = 0;
861

    
862
    if (!(dst & 0xffffffffULL))
863
        ret |= PSR_ZERO;
864
    if ((int32_t) (dst & 0xffffffffULL) < 0)
865
        ret |= PSR_NEG;
866
    return ret;
867
}
868

    
869
#ifdef TARGET_SPARC64
870
static uint32_t compute_all_flags_xcc(void)
871
{
872
    return env->xcc & PSR_ICC;
873
}
874

    
875
static uint32_t compute_C_flags_xcc(void)
876
{
877
    return env->xcc & PSR_CARRY;
878
}
879

    
880
static inline uint32_t get_NZ_xcc(target_ulong dst)
881
{
882
    uint32_t ret = 0;
883

    
884
    if (!dst)
885
        ret |= PSR_ZERO;
886
    if ((int64_t)dst < 0)
887
        ret |= PSR_NEG;
888
    return ret;
889
}
890
#endif
891

    
892
static inline uint32_t get_V_div_icc(target_ulong src2)
893
{
894
    uint32_t ret = 0;
895

    
896
    if (src2 != 0)
897
        ret |= PSR_OVF;
898
    return ret;
899
}
900

    
901
static uint32_t compute_all_div(void)
902
{
903
    uint32_t ret;
904

    
905
    ret = get_NZ_icc(CC_DST);
906
    ret |= get_V_div_icc(CC_SRC2);
907
    return ret;
908
}
909

    
910
static uint32_t compute_C_div(void)
911
{
912
    return 0;
913
}
914

    
915
/* carry = (src1[31] & src2[31]) | ( ~dst[31] & (src1[31] | src2[31])) */
916
static inline uint32_t get_C_add_icc(target_ulong dst, target_ulong src1,
917
                                     target_ulong src2)
918
{
919
    uint32_t ret = 0;
920

    
921
    if (((src1 & (1ULL << 31)) & (src2 & (1ULL << 31)))
922
        | ((~(dst & (1ULL << 31)))
923
           & ((src1 & (1ULL << 31)) | (src2 & (1ULL << 31)))))
924
        ret |= PSR_CARRY;
925
    return ret;
926
}
927

    
928
static inline uint32_t get_V_add_icc(target_ulong dst, target_ulong src1,
929
                                         target_ulong src2)
930
{
931
    uint32_t ret = 0;
932

    
933
    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 31))
934
        ret |= PSR_OVF;
935
    return ret;
936
}
937

    
938
#ifdef TARGET_SPARC64
939
static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1)
940
{
941
    uint32_t ret = 0;
942

    
943
    if (dst < src1)
944
        ret |= PSR_CARRY;
945
    return ret;
946
}
947

    
948
static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1,
949
                                         target_ulong src2)
950
{
951
    uint32_t ret = 0;
952

    
953
    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63))
954
        ret |= PSR_OVF;
955
    return ret;
956
}
957

    
958
static uint32_t compute_all_add_xcc(void)
959
{
960
    uint32_t ret;
961

    
962
    ret = get_NZ_xcc(CC_DST);
963
    ret |= get_C_add_xcc(CC_DST, CC_SRC);
964
    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
965
    return ret;
966
}
967

    
968
static uint32_t compute_C_add_xcc(void)
969
{
970
    return get_C_add_xcc(CC_DST, CC_SRC);
971
}
972
#endif
973

    
974
static uint32_t compute_all_add(void)
975
{
976
    uint32_t ret;
977

    
978
    ret = get_NZ_icc(CC_DST);
979
    ret |= get_C_add_icc(CC_DST, CC_SRC, CC_SRC2);
980
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
981
    return ret;
982
}
983

    
984
static uint32_t compute_C_add(void)
985
{
986
    return get_C_add_icc(CC_DST, CC_SRC, CC_SRC2);
987
}
988

    
989
#ifdef TARGET_SPARC64
990
static uint32_t compute_all_addx_xcc(void)
991
{
992
    uint32_t ret;
993

    
994
    ret = get_NZ_xcc(CC_DST);
995
    ret |= get_C_add_xcc(CC_DST - CC_SRC2, CC_SRC);
996
    ret |= get_C_add_xcc(CC_DST, CC_SRC);
997
    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
998
    return ret;
999
}
1000

    
1001
static uint32_t compute_C_addx_xcc(void)
1002
{
1003
    uint32_t ret;
1004

    
1005
    ret = get_C_add_xcc(CC_DST - CC_SRC2, CC_SRC);
1006
    ret |= get_C_add_xcc(CC_DST, CC_SRC);
1007
    return ret;
1008
}
1009
#endif
1010

    
1011
static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2)
1012
{
1013
    uint32_t ret = 0;
1014

    
1015
    if ((src1 | src2) & 0x3)
1016
        ret |= PSR_OVF;
1017
    return ret;
1018
}
1019

    
1020
static uint32_t compute_all_tadd(void)
1021
{
1022
    uint32_t ret;
1023

    
1024
    ret = get_NZ_icc(CC_DST);
1025
    ret |= get_C_add_icc(CC_DST, CC_SRC, CC_SRC2);
1026
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1027
    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1028
    return ret;
1029
}
1030

    
1031
static uint32_t compute_C_tadd(void)
1032
{
1033
    return get_C_add_icc(CC_DST, CC_SRC, CC_SRC2);
1034
}
1035

    
1036
static uint32_t compute_all_taddtv(void)
1037
{
1038
    uint32_t ret;
1039

    
1040
    ret = get_NZ_icc(CC_DST);
1041
    ret |= get_C_add_icc(CC_DST, CC_SRC, CC_SRC2);
1042
    return ret;
1043
}
1044

    
1045
static uint32_t compute_C_taddtv(void)
1046
{
1047
    return get_C_add_icc(CC_DST, CC_SRC, CC_SRC2);
1048
}
1049

    
1050
/* carry = (~src1[31] & src2[31]) | ( dst[31]  & (~src1[31] | src2[31])) */
1051
static inline uint32_t get_C_sub_icc(target_ulong dst, target_ulong src1,
1052
                                     target_ulong src2)
1053
{
1054
    uint32_t ret = 0;
1055

    
1056
    if (((~(src1 & (1ULL << 31))) & (src2 & (1ULL << 31)))
1057
        | ((dst & (1ULL << 31)) & (( ~(src1 & (1ULL << 31)))
1058
                                   | (src2 & (1ULL << 31)))))
1059
        ret |= PSR_CARRY;
1060
    return ret;
1061
}
1062

    
1063
static inline uint32_t get_V_sub_icc(target_ulong dst, target_ulong src1,
1064
                                     target_ulong src2)
1065
{
1066
    uint32_t ret = 0;
1067

    
1068
    if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 31))
1069
        ret |= PSR_OVF;
1070
    return ret;
1071
}
1072

    
1073

    
1074
#ifdef TARGET_SPARC64
1075
static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2)
1076
{
1077
    uint32_t ret = 0;
1078

    
1079
    if (src1 < src2)
1080
        ret |= PSR_CARRY;
1081
    return ret;
1082
}
1083

    
1084
static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1,
1085
                                     target_ulong src2)
1086
{
1087
    uint32_t ret = 0;
1088

    
1089
    if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63))
1090
        ret |= PSR_OVF;
1091
    return ret;
1092
}
1093

    
1094
static uint32_t compute_all_sub_xcc(void)
1095
{
1096
    uint32_t ret;
1097

    
1098
    ret = get_NZ_xcc(CC_DST);
1099
    ret |= get_C_sub_xcc(CC_SRC, CC_SRC2);
1100
    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1101
    return ret;
1102
}
1103

    
1104
static uint32_t compute_C_sub_xcc(void)
1105
{
1106
    return get_C_sub_xcc(CC_SRC, CC_SRC2);
1107
}
1108
#endif
1109

    
1110
static uint32_t compute_all_sub(void)
1111
{
1112
    uint32_t ret;
1113

    
1114
    ret = get_NZ_icc(CC_DST);
1115
    ret |= get_C_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1116
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1117
    return ret;
1118
}
1119

    
1120
static uint32_t compute_C_sub(void)
1121
{
1122
    return get_C_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1123
}
1124

    
1125
#ifdef TARGET_SPARC64
1126
static uint32_t compute_all_subx_xcc(void)
1127
{
1128
    uint32_t ret;
1129

    
1130
    ret = get_NZ_xcc(CC_DST);
1131
    ret |= get_C_sub_xcc(CC_DST - CC_SRC2, CC_SRC);
1132
    ret |= get_C_sub_xcc(CC_DST, CC_SRC2);
1133
    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1134
    return ret;
1135
}
1136

    
1137
static uint32_t compute_C_subx_xcc(void)
1138
{
1139
    uint32_t ret;
1140

    
1141
    ret = get_C_sub_xcc(CC_DST - CC_SRC2, CC_SRC);
1142
    ret |= get_C_sub_xcc(CC_DST, CC_SRC2);
1143
    return ret;
1144
}
1145
#endif
1146

    
1147
static uint32_t compute_all_tsub(void)
1148
{
1149
    uint32_t ret;
1150

    
1151
    ret = get_NZ_icc(CC_DST);
1152
    ret |= get_C_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1153
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1154
    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1155
    return ret;
1156
}
1157

    
1158
static uint32_t compute_C_tsub(void)
1159
{
1160
    return get_C_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1161
}
1162

    
1163
static uint32_t compute_all_tsubtv(void)
1164
{
1165
    uint32_t ret;
1166

    
1167
    ret = get_NZ_icc(CC_DST);
1168
    ret |= get_C_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1169
    return ret;
1170
}
1171

    
1172
static uint32_t compute_C_tsubtv(void)
1173
{
1174
    return get_C_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1175
}
1176

    
1177
static uint32_t compute_all_logic(void)
1178
{
1179
    return get_NZ_icc(CC_DST);
1180
}
1181

    
1182
static uint32_t compute_C_logic(void)
1183
{
1184
    return 0;
1185
}
1186

    
1187
#ifdef TARGET_SPARC64
1188
static uint32_t compute_all_logic_xcc(void)
1189
{
1190
    return get_NZ_xcc(CC_DST);
1191
}
1192
#endif
1193

    
1194
typedef struct CCTable {
1195
    uint32_t (*compute_all)(void); /* return all the flags */
1196
    uint32_t (*compute_c)(void);  /* return the C flag */
1197
} CCTable;
1198

    
1199
static const CCTable icc_table[CC_OP_NB] = {
1200
    /* CC_OP_DYNAMIC should never happen */
1201
    [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags },
1202
    [CC_OP_DIV] = { compute_all_div, compute_C_div },
1203
    [CC_OP_ADD] = { compute_all_add, compute_C_add },
1204
    [CC_OP_ADDX] = { compute_all_add, compute_C_add },
1205
    [CC_OP_TADD] = { compute_all_tadd, compute_C_tadd },
1206
    [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_taddtv },
1207
    [CC_OP_SUB] = { compute_all_sub, compute_C_sub },
1208
    [CC_OP_SUBX] = { compute_all_sub, compute_C_sub },
1209
    [CC_OP_TSUB] = { compute_all_tsub, compute_C_tsub },
1210
    [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_tsubtv },
1211
    [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic },
1212
};
1213

    
1214
#ifdef TARGET_SPARC64
1215
static const CCTable xcc_table[CC_OP_NB] = {
1216
    /* CC_OP_DYNAMIC should never happen */
1217
    [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc },
1218
    [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic },
1219
    [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc },
1220
    [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc },
1221
    [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc },
1222
    [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc },
1223
    [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1224
    [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc },
1225
    [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1226
    [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc },
1227
    [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic },
1228
};
1229
#endif
1230

    
1231
void helper_compute_psr(void)
1232
{
1233
    uint32_t new_psr;
1234

    
1235
    new_psr = icc_table[CC_OP].compute_all();
1236
    env->psr = new_psr;
1237
#ifdef TARGET_SPARC64
1238
    new_psr = xcc_table[CC_OP].compute_all();
1239
    env->xcc = new_psr;
1240
#endif
1241
    CC_OP = CC_OP_FLAGS;
1242
}
1243

    
1244
uint32_t helper_compute_C_icc(void)
1245
{
1246
    uint32_t ret;
1247

    
1248
    ret = icc_table[CC_OP].compute_c() >> PSR_CARRY_SHIFT;
1249
    return ret;
1250
}
1251

    
1252
#ifdef TARGET_SPARC64
1253
GEN_FCMPS(fcmps_fcc1, float32, 22, 0);
1254
GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
1255
GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
1256

    
1257
GEN_FCMPS(fcmps_fcc2, float32, 24, 0);
1258
GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
1259
GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
1260

    
1261
GEN_FCMPS(fcmps_fcc3, float32, 26, 0);
1262
GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
1263
GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
1264

    
1265
GEN_FCMPS(fcmpes_fcc1, float32, 22, 1);
1266
GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
1267
GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
1268

    
1269
GEN_FCMPS(fcmpes_fcc2, float32, 24, 1);
1270
GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
1271
GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
1272

    
1273
GEN_FCMPS(fcmpes_fcc3, float32, 26, 1);
1274
GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
1275
GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
1276
#endif
1277
#undef GEN_FCMPS
1278

    
1279
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
1280
    defined(DEBUG_MXCC)
1281
static void dump_mxcc(CPUState *env)
1282
{
1283
    printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1284
           "\n",
1285
           env->mxccdata[0], env->mxccdata[1],
1286
           env->mxccdata[2], env->mxccdata[3]);
1287
    printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1288
           "\n"
1289
           "          %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1290
           "\n",
1291
           env->mxccregs[0], env->mxccregs[1],
1292
           env->mxccregs[2], env->mxccregs[3],
1293
           env->mxccregs[4], env->mxccregs[5],
1294
           env->mxccregs[6], env->mxccregs[7]);
1295
}
1296
#endif
1297

    
1298
#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
1299
    && defined(DEBUG_ASI)
1300
static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
1301
                     uint64_t r1)
1302
{
1303
    switch (size)
1304
    {
1305
    case 1:
1306
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
1307
                    addr, asi, r1 & 0xff);
1308
        break;
1309
    case 2:
1310
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
1311
                    addr, asi, r1 & 0xffff);
1312
        break;
1313
    case 4:
1314
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
1315
                    addr, asi, r1 & 0xffffffff);
1316
        break;
1317
    case 8:
1318
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
1319
                    addr, asi, r1);
1320
        break;
1321
    }
1322
}
1323
#endif
1324

    
1325
#ifndef TARGET_SPARC64
1326
#ifndef CONFIG_USER_ONLY
1327
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1328
{
1329
    uint64_t ret = 0;
1330
#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
1331
    uint32_t last_addr = addr;
1332
#endif
1333

    
1334
    helper_check_align(addr, size - 1);
1335
    switch (asi) {
1336
    case 2: /* SuperSparc MXCC registers */
1337
        switch (addr) {
1338
        case 0x01c00a00: /* MXCC control register */
1339
            if (size == 8)
1340
                ret = env->mxccregs[3];
1341
            else
1342
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1343
                             size);
1344
            break;
1345
        case 0x01c00a04: /* MXCC control register */
1346
            if (size == 4)
1347
                ret = env->mxccregs[3];
1348
            else
1349
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1350
                             size);
1351
            break;
1352
        case 0x01c00c00: /* Module reset register */
1353
            if (size == 8) {
1354
                ret = env->mxccregs[5];
1355
                // should we do something here?
1356
            } else
1357
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1358
                             size);
1359
            break;
1360
        case 0x01c00f00: /* MBus port address register */
1361
            if (size == 8)
1362
                ret = env->mxccregs[7];
1363
            else
1364
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1365
                             size);
1366
            break;
1367
        default:
1368
            DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1369
                         size);
1370
            break;
1371
        }
1372
        DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
1373
                     "addr = %08x -> ret = %" PRIx64 ","
1374
                     "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
1375
#ifdef DEBUG_MXCC
1376
        dump_mxcc(env);
1377
#endif
1378
        break;
1379
    case 3: /* MMU probe */
1380
        {
1381
            int mmulev;
1382

    
1383
            mmulev = (addr >> 8) & 15;
1384
            if (mmulev > 4)
1385
                ret = 0;
1386
            else
1387
                ret = mmu_probe(env, addr, mmulev);
1388
            DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
1389
                        addr, mmulev, ret);
1390
        }
1391
        break;
1392
    case 4: /* read MMU regs */
1393
        {
1394
            int reg = (addr >> 8) & 0x1f;
1395

    
1396
            ret = env->mmuregs[reg];
1397
            if (reg == 3) /* Fault status cleared on read */
1398
                env->mmuregs[3] = 0;
1399
            else if (reg == 0x13) /* Fault status read */
1400
                ret = env->mmuregs[3];
1401
            else if (reg == 0x14) /* Fault address read */
1402
                ret = env->mmuregs[4];
1403
            DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
1404
        }
1405
        break;
1406
    case 5: // Turbosparc ITLB Diagnostic
1407
    case 6: // Turbosparc DTLB Diagnostic
1408
    case 7: // Turbosparc IOTLB Diagnostic
1409
        break;
1410
    case 9: /* Supervisor code access */
1411
        switch(size) {
1412
        case 1:
1413
            ret = ldub_code(addr);
1414
            break;
1415
        case 2:
1416
            ret = lduw_code(addr);
1417
            break;
1418
        default:
1419
        case 4:
1420
            ret = ldl_code(addr);
1421
            break;
1422
        case 8:
1423
            ret = ldq_code(addr);
1424
            break;
1425
        }
1426
        break;
1427
    case 0xa: /* User data access */
1428
        switch(size) {
1429
        case 1:
1430
            ret = ldub_user(addr);
1431
            break;
1432
        case 2:
1433
            ret = lduw_user(addr);
1434
            break;
1435
        default:
1436
        case 4:
1437
            ret = ldl_user(addr);
1438
            break;
1439
        case 8:
1440
            ret = ldq_user(addr);
1441
            break;
1442
        }
1443
        break;
1444
    case 0xb: /* Supervisor data access */
1445
        switch(size) {
1446
        case 1:
1447
            ret = ldub_kernel(addr);
1448
            break;
1449
        case 2:
1450
            ret = lduw_kernel(addr);
1451
            break;
1452
        default:
1453
        case 4:
1454
            ret = ldl_kernel(addr);
1455
            break;
1456
        case 8:
1457
            ret = ldq_kernel(addr);
1458
            break;
1459
        }
1460
        break;
1461
    case 0xc: /* I-cache tag */
1462
    case 0xd: /* I-cache data */
1463
    case 0xe: /* D-cache tag */
1464
    case 0xf: /* D-cache data */
1465
        break;
1466
    case 0x20: /* MMU passthrough */
1467
        switch(size) {
1468
        case 1:
1469
            ret = ldub_phys(addr);
1470
            break;
1471
        case 2:
1472
            ret = lduw_phys(addr);
1473
            break;
1474
        default:
1475
        case 4:
1476
            ret = ldl_phys(addr);
1477
            break;
1478
        case 8:
1479
            ret = ldq_phys(addr);
1480
            break;
1481
        }
1482
        break;
1483
    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1484
        switch(size) {
1485
        case 1:
1486
            ret = ldub_phys((target_phys_addr_t)addr
1487
                            | ((target_phys_addr_t)(asi & 0xf) << 32));
1488
            break;
1489
        case 2:
1490
            ret = lduw_phys((target_phys_addr_t)addr
1491
                            | ((target_phys_addr_t)(asi & 0xf) << 32));
1492
            break;
1493
        default:
1494
        case 4:
1495
            ret = ldl_phys((target_phys_addr_t)addr
1496
                           | ((target_phys_addr_t)(asi & 0xf) << 32));
1497
            break;
1498
        case 8:
1499
            ret = ldq_phys((target_phys_addr_t)addr
1500
                           | ((target_phys_addr_t)(asi & 0xf) << 32));
1501
            break;
1502
        }
1503
        break;
1504
    case 0x30: // Turbosparc secondary cache diagnostic
1505
    case 0x31: // Turbosparc RAM snoop
1506
    case 0x32: // Turbosparc page table descriptor diagnostic
1507
    case 0x39: /* data cache diagnostic register */
1508
        ret = 0;
1509
        break;
1510
    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
1511
        {
1512
            int reg = (addr >> 8) & 3;
1513

    
1514
            switch(reg) {
1515
            case 0: /* Breakpoint Value (Addr) */
1516
                ret = env->mmubpregs[reg];
1517
                break;
1518
            case 1: /* Breakpoint Mask */
1519
                ret = env->mmubpregs[reg];
1520
                break;
1521
            case 2: /* Breakpoint Control */
1522
                ret = env->mmubpregs[reg];
1523
                break;
1524
            case 3: /* Breakpoint Status */
1525
                ret = env->mmubpregs[reg];
1526
                env->mmubpregs[reg] = 0ULL;
1527
                break;
1528
            }
1529
            DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
1530
                        ret);
1531
        }
1532
        break;
1533
    case 8: /* User code access, XXX */
1534
    default:
1535
        do_unassigned_access(addr, 0, 0, asi, size);
1536
        ret = 0;
1537
        break;
1538
    }
1539
    if (sign) {
1540
        switch(size) {
1541
        case 1:
1542
            ret = (int8_t) ret;
1543
            break;
1544
        case 2:
1545
            ret = (int16_t) ret;
1546
            break;
1547
        case 4:
1548
            ret = (int32_t) ret;
1549
            break;
1550
        default:
1551
            break;
1552
        }
1553
    }
1554
#ifdef DEBUG_ASI
1555
    dump_asi("read ", last_addr, asi, size, ret);
1556
#endif
1557
    return ret;
1558
}
1559

    
1560
void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
1561
{
1562
    helper_check_align(addr, size - 1);
1563
    switch(asi) {
1564
    case 2: /* SuperSparc MXCC registers */
1565
        switch (addr) {
1566
        case 0x01c00000: /* MXCC stream data register 0 */
1567
            if (size == 8)
1568
                env->mxccdata[0] = val;
1569
            else
1570
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1571
                             size);
1572
            break;
1573
        case 0x01c00008: /* MXCC stream data register 1 */
1574
            if (size == 8)
1575
                env->mxccdata[1] = val;
1576
            else
1577
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1578
                             size);
1579
            break;
1580
        case 0x01c00010: /* MXCC stream data register 2 */
1581
            if (size == 8)
1582
                env->mxccdata[2] = val;
1583
            else
1584
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1585
                             size);
1586
            break;
1587
        case 0x01c00018: /* MXCC stream data register 3 */
1588
            if (size == 8)
1589
                env->mxccdata[3] = val;
1590
            else
1591
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1592
                             size);
1593
            break;
1594
        case 0x01c00100: /* MXCC stream source */
1595
            if (size == 8)
1596
                env->mxccregs[0] = val;
1597
            else
1598
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1599
                             size);
1600
            env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1601
                                        0);
1602
            env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1603
                                        8);
1604
            env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1605
                                        16);
1606
            env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1607
                                        24);
1608
            break;
1609
        case 0x01c00200: /* MXCC stream destination */
1610
            if (size == 8)
1611
                env->mxccregs[1] = val;
1612
            else
1613
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1614
                             size);
1615
            stq_phys((env->mxccregs[1] & 0xffffffffULL) +  0,
1616
                     env->mxccdata[0]);
1617
            stq_phys((env->mxccregs[1] & 0xffffffffULL) +  8,
1618
                     env->mxccdata[1]);
1619
            stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
1620
                     env->mxccdata[2]);
1621
            stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
1622
                     env->mxccdata[3]);
1623
            break;
1624
        case 0x01c00a00: /* MXCC control register */
1625
            if (size == 8)
1626
                env->mxccregs[3] = val;
1627
            else
1628
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1629
                             size);
1630
            break;
1631
        case 0x01c00a04: /* MXCC control register */
1632
            if (size == 4)
1633
                env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
1634
                    | val;
1635
            else
1636
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1637
                             size);
1638
            break;
1639
        case 0x01c00e00: /* MXCC error register  */
1640
            // writing a 1 bit clears the error
1641
            if (size == 8)
1642
                env->mxccregs[6] &= ~val;
1643
            else
1644
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1645
                             size);
1646
            break;
1647
        case 0x01c00f00: /* MBus port address register */
1648
            if (size == 8)
1649
                env->mxccregs[7] = val;
1650
            else
1651
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1652
                             size);
1653
            break;
1654
        default:
1655
            DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1656
                         size);
1657
            break;
1658
        }
1659
        DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
1660
                     asi, size, addr, val);
1661
#ifdef DEBUG_MXCC
1662
        dump_mxcc(env);
1663
#endif
1664
        break;
1665
    case 3: /* MMU flush */
1666
        {
1667
            int mmulev;
1668

    
1669
            mmulev = (addr >> 8) & 15;
1670
            DPRINTF_MMU("mmu flush level %d\n", mmulev);
1671
            switch (mmulev) {
1672
            case 0: // flush page
1673
                tlb_flush_page(env, addr & 0xfffff000);
1674
                break;
1675
            case 1: // flush segment (256k)
1676
            case 2: // flush region (16M)
1677
            case 3: // flush context (4G)
1678
            case 4: // flush entire
1679
                tlb_flush(env, 1);
1680
                break;
1681
            default:
1682
                break;
1683
            }
1684
#ifdef DEBUG_MMU
1685
            dump_mmu(env);
1686
#endif
1687
        }
1688
        break;
1689
    case 4: /* write MMU regs */
1690
        {
1691
            int reg = (addr >> 8) & 0x1f;
1692
            uint32_t oldreg;
1693

    
1694
            oldreg = env->mmuregs[reg];
1695
            switch(reg) {
1696
            case 0: // Control Register
1697
                env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
1698
                                    (val & 0x00ffffff);
1699
                // Mappings generated during no-fault mode or MMU
1700
                // disabled mode are invalid in normal mode
1701
                if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
1702
                    (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
1703
                    tlb_flush(env, 1);
1704
                break;
1705
            case 1: // Context Table Pointer Register
1706
                env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
1707
                break;
1708
            case 2: // Context Register
1709
                env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
1710
                if (oldreg != env->mmuregs[reg]) {
1711
                    /* we flush when the MMU context changes because
1712
                       QEMU has no MMU context support */
1713
                    tlb_flush(env, 1);
1714
                }
1715
                break;
1716
            case 3: // Synchronous Fault Status Register with Clear
1717
            case 4: // Synchronous Fault Address Register
1718
                break;
1719
            case 0x10: // TLB Replacement Control Register
1720
                env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
1721
                break;
1722
            case 0x13: // Synchronous Fault Status Register with Read and Clear
1723
                env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
1724
                break;
1725
            case 0x14: // Synchronous Fault Address Register
1726
                env->mmuregs[4] = val;
1727
                break;
1728
            default:
1729
                env->mmuregs[reg] = val;
1730
                break;
1731
            }
1732
            if (oldreg != env->mmuregs[reg]) {
1733
                DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1734
                            reg, oldreg, env->mmuregs[reg]);
1735
            }
1736
#ifdef DEBUG_MMU
1737
            dump_mmu(env);
1738
#endif
1739
        }
1740
        break;
1741
    case 5: // Turbosparc ITLB Diagnostic
1742
    case 6: // Turbosparc DTLB Diagnostic
1743
    case 7: // Turbosparc IOTLB Diagnostic
1744
        break;
1745
    case 0xa: /* User data access */
1746
        switch(size) {
1747
        case 1:
1748
            stb_user(addr, val);
1749
            break;
1750
        case 2:
1751
            stw_user(addr, val);
1752
            break;
1753
        default:
1754
        case 4:
1755
            stl_user(addr, val);
1756
            break;
1757
        case 8:
1758
            stq_user(addr, val);
1759
            break;
1760
        }
1761
        break;
1762
    case 0xb: /* Supervisor data access */
1763
        switch(size) {
1764
        case 1:
1765
            stb_kernel(addr, val);
1766
            break;
1767
        case 2:
1768
            stw_kernel(addr, val);
1769
            break;
1770
        default:
1771
        case 4:
1772
            stl_kernel(addr, val);
1773
            break;
1774
        case 8:
1775
            stq_kernel(addr, val);
1776
            break;
1777
        }
1778
        break;
1779
    case 0xc: /* I-cache tag */
1780
    case 0xd: /* I-cache data */
1781
    case 0xe: /* D-cache tag */
1782
    case 0xf: /* D-cache data */
1783
    case 0x10: /* I/D-cache flush page */
1784
    case 0x11: /* I/D-cache flush segment */
1785
    case 0x12: /* I/D-cache flush region */
1786
    case 0x13: /* I/D-cache flush context */
1787
    case 0x14: /* I/D-cache flush user */
1788
        break;
1789
    case 0x17: /* Block copy, sta access */
1790
        {
1791
            // val = src
1792
            // addr = dst
1793
            // copy 32 bytes
1794
            unsigned int i;
1795
            uint32_t src = val & ~3, dst = addr & ~3, temp;
1796

    
1797
            for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
1798
                temp = ldl_kernel(src);
1799
                stl_kernel(dst, temp);
1800
            }
1801
        }
1802
        break;
1803
    case 0x1f: /* Block fill, stda access */
1804
        {
1805
            // addr = dst
1806
            // fill 32 bytes with val
1807
            unsigned int i;
1808
            uint32_t dst = addr & 7;
1809

    
1810
            for (i = 0; i < 32; i += 8, dst += 8)
1811
                stq_kernel(dst, val);
1812
        }
1813
        break;
1814
    case 0x20: /* MMU passthrough */
1815
        {
1816
            switch(size) {
1817
            case 1:
1818
                stb_phys(addr, val);
1819
                break;
1820
            case 2:
1821
                stw_phys(addr, val);
1822
                break;
1823
            case 4:
1824
            default:
1825
                stl_phys(addr, val);
1826
                break;
1827
            case 8:
1828
                stq_phys(addr, val);
1829
                break;
1830
            }
1831
        }
1832
        break;
1833
    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1834
        {
1835
            switch(size) {
1836
            case 1:
1837
                stb_phys((target_phys_addr_t)addr
1838
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1839
                break;
1840
            case 2:
1841
                stw_phys((target_phys_addr_t)addr
1842
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1843
                break;
1844
            case 4:
1845
            default:
1846
                stl_phys((target_phys_addr_t)addr
1847
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1848
                break;
1849
            case 8:
1850
                stq_phys((target_phys_addr_t)addr
1851
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1852
                break;
1853
            }
1854
        }
1855
        break;
1856
    case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
1857
    case 0x31: // store buffer data, Ross RT620 I-cache flush or
1858
               // Turbosparc snoop RAM
1859
    case 0x32: // store buffer control or Turbosparc page table
1860
               // descriptor diagnostic
1861
    case 0x36: /* I-cache flash clear */
1862
    case 0x37: /* D-cache flash clear */
1863
    case 0x4c: /* breakpoint action */
1864
        break;
1865
    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
1866
        {
1867
            int reg = (addr >> 8) & 3;
1868

    
1869
            switch(reg) {
1870
            case 0: /* Breakpoint Value (Addr) */
1871
                env->mmubpregs[reg] = (val & 0xfffffffffULL);
1872
                break;
1873
            case 1: /* Breakpoint Mask */
1874
                env->mmubpregs[reg] = (val & 0xfffffffffULL);
1875
                break;
1876
            case 2: /* Breakpoint Control */
1877
                env->mmubpregs[reg] = (val & 0x7fULL);
1878
                break;
1879
            case 3: /* Breakpoint Status */
1880
                env->mmubpregs[reg] = (val & 0xfULL);
1881
                break;
1882
            }
1883
            DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
1884
                        env->mmuregs[reg]);
1885
        }
1886
        break;
1887
    case 8: /* User code access, XXX */
1888
    case 9: /* Supervisor code access, XXX */
1889
    default:
1890
        do_unassigned_access(addr, 1, 0, asi, size);
1891
        break;
1892
    }
1893
#ifdef DEBUG_ASI
1894
    dump_asi("write", addr, asi, size, val);
1895
#endif
1896
}
1897

    
1898
#endif /* CONFIG_USER_ONLY */
1899
#else /* TARGET_SPARC64 */
1900

    
1901
#ifdef CONFIG_USER_ONLY
1902
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1903
{
1904
    uint64_t ret = 0;
1905
#if defined(DEBUG_ASI)
1906
    target_ulong last_addr = addr;
1907
#endif
1908

    
1909
    if (asi < 0x80)
1910
        raise_exception(TT_PRIV_ACT);
1911

    
1912
    helper_check_align(addr, size - 1);
1913
    address_mask(env, &addr);
1914

    
1915
    switch (asi) {
1916
    case 0x82: // Primary no-fault
1917
    case 0x8a: // Primary no-fault LE
1918
        if (page_check_range(addr, size, PAGE_READ) == -1) {
1919
#ifdef DEBUG_ASI
1920
            dump_asi("read ", last_addr, asi, size, ret);
1921
#endif
1922
            return 0;
1923
        }
1924
        // Fall through
1925
    case 0x80: // Primary
1926
    case 0x88: // Primary LE
1927
        {
1928
            switch(size) {
1929
            case 1:
1930
                ret = ldub_raw(addr);
1931
                break;
1932
            case 2:
1933
                ret = lduw_raw(addr);
1934
                break;
1935
            case 4:
1936
                ret = ldl_raw(addr);
1937
                break;
1938
            default:
1939
            case 8:
1940
                ret = ldq_raw(addr);
1941
                break;
1942
            }
1943
        }
1944
        break;
1945
    case 0x83: // Secondary no-fault
1946
    case 0x8b: // Secondary no-fault LE
1947
        if (page_check_range(addr, size, PAGE_READ) == -1) {
1948
#ifdef DEBUG_ASI
1949
            dump_asi("read ", last_addr, asi, size, ret);
1950
#endif
1951
            return 0;
1952
        }
1953
        // Fall through
1954
    case 0x81: // Secondary
1955
    case 0x89: // Secondary LE
1956
        // XXX
1957
        break;
1958
    default:
1959
        break;
1960
    }
1961

    
1962
    /* Convert from little endian */
1963
    switch (asi) {
1964
    case 0x88: // Primary LE
1965
    case 0x89: // Secondary LE
1966
    case 0x8a: // Primary no-fault LE
1967
    case 0x8b: // Secondary no-fault LE
1968
        switch(size) {
1969
        case 2:
1970
            ret = bswap16(ret);
1971
            break;
1972
        case 4:
1973
            ret = bswap32(ret);
1974
            break;
1975
        case 8:
1976
            ret = bswap64(ret);
1977
            break;
1978
        default:
1979
            break;
1980
        }
1981
    default:
1982
        break;
1983
    }
1984

    
1985
    /* Convert to signed number */
1986
    if (sign) {
1987
        switch(size) {
1988
        case 1:
1989
            ret = (int8_t) ret;
1990
            break;
1991
        case 2:
1992
            ret = (int16_t) ret;
1993
            break;
1994
        case 4:
1995
            ret = (int32_t) ret;
1996
            break;
1997
        default:
1998
            break;
1999
        }
2000
    }
2001
#ifdef DEBUG_ASI
2002
    dump_asi("read ", last_addr, asi, size, ret);
2003
#endif
2004
    return ret;
2005
}
2006

    
2007
void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2008
{
2009
#ifdef DEBUG_ASI
2010
    dump_asi("write", addr, asi, size, val);
2011
#endif
2012
    if (asi < 0x80)
2013
        raise_exception(TT_PRIV_ACT);
2014

    
2015
    helper_check_align(addr, size - 1);
2016
    address_mask(env, &addr);
2017

    
2018
    /* Convert to little endian */
2019
    switch (asi) {
2020
    case 0x88: // Primary LE
2021
    case 0x89: // Secondary LE
2022
        switch(size) {
2023
        case 2:
2024
            val = bswap16(val);
2025
            break;
2026
        case 4:
2027
            val = bswap32(val);
2028
            break;
2029
        case 8:
2030
            val = bswap64(val);
2031
            break;
2032
        default:
2033
            break;
2034
        }
2035
    default:
2036
        break;
2037
    }
2038

    
2039
    switch(asi) {
2040
    case 0x80: // Primary
2041
    case 0x88: // Primary LE
2042
        {
2043
            switch(size) {
2044
            case 1:
2045
                stb_raw(addr, val);
2046
                break;
2047
            case 2:
2048
                stw_raw(addr, val);
2049
                break;
2050
            case 4:
2051
                stl_raw(addr, val);
2052
                break;
2053
            case 8:
2054
            default:
2055
                stq_raw(addr, val);
2056
                break;
2057
            }
2058
        }
2059
        break;
2060
    case 0x81: // Secondary
2061
    case 0x89: // Secondary LE
2062
        // XXX
2063
        return;
2064

    
2065
    case 0x82: // Primary no-fault, RO
2066
    case 0x83: // Secondary no-fault, RO
2067
    case 0x8a: // Primary no-fault LE, RO
2068
    case 0x8b: // Secondary no-fault LE, RO
2069
    default:
2070
        do_unassigned_access(addr, 1, 0, 1, size);
2071
        return;
2072
    }
2073
}
2074

    
2075
#else /* CONFIG_USER_ONLY */
2076

    
2077
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2078
{
2079
    uint64_t ret = 0;
2080
#if defined(DEBUG_ASI)
2081
    target_ulong last_addr = addr;
2082
#endif
2083

    
2084
    asi &= 0xff;
2085

    
2086
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2087
        || ((env->def->features & CPU_FEATURE_HYPV)
2088
            && asi >= 0x30 && asi < 0x80
2089
            && !(env->hpstate & HS_PRIV)))
2090
        raise_exception(TT_PRIV_ACT);
2091

    
2092
    helper_check_align(addr, size - 1);
2093
    switch (asi) {
2094
    case 0x82: // Primary no-fault
2095
    case 0x8a: // Primary no-fault LE
2096
        if (cpu_get_phys_page_debug(env, addr) == -1ULL) {
2097
#ifdef DEBUG_ASI
2098
            dump_asi("read ", last_addr, asi, size, ret);
2099
#endif
2100
            return 0;
2101
        }
2102
        // Fall through
2103
    case 0x10: // As if user primary
2104
    case 0x18: // As if user primary LE
2105
    case 0x80: // Primary
2106
    case 0x88: // Primary LE
2107
    case 0xe2: // UA2007 Primary block init
2108
    case 0xe3: // UA2007 Secondary block init
2109
        if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2110
            if ((env->def->features & CPU_FEATURE_HYPV)
2111
                && env->hpstate & HS_PRIV) {
2112
                switch(size) {
2113
                case 1:
2114
                    ret = ldub_hypv(addr);
2115
                    break;
2116
                case 2:
2117
                    ret = lduw_hypv(addr);
2118
                    break;
2119
                case 4:
2120
                    ret = ldl_hypv(addr);
2121
                    break;
2122
                default:
2123
                case 8:
2124
                    ret = ldq_hypv(addr);
2125
                    break;
2126
                }
2127
            } else {
2128
                switch(size) {
2129
                case 1:
2130
                    ret = ldub_kernel(addr);
2131
                    break;
2132
                case 2:
2133
                    ret = lduw_kernel(addr);
2134
                    break;
2135
                case 4:
2136
                    ret = ldl_kernel(addr);
2137
                    break;
2138
                default:
2139
                case 8:
2140
                    ret = ldq_kernel(addr);
2141
                    break;
2142
                }
2143
            }
2144
        } else {
2145
            switch(size) {
2146
            case 1:
2147
                ret = ldub_user(addr);
2148
                break;
2149
            case 2:
2150
                ret = lduw_user(addr);
2151
                break;
2152
            case 4:
2153
                ret = ldl_user(addr);
2154
                break;
2155
            default:
2156
            case 8:
2157
                ret = ldq_user(addr);
2158
                break;
2159
            }
2160
        }
2161
        break;
2162
    case 0x14: // Bypass
2163
    case 0x15: // Bypass, non-cacheable
2164
    case 0x1c: // Bypass LE
2165
    case 0x1d: // Bypass, non-cacheable LE
2166
        {
2167
            switch(size) {
2168
            case 1:
2169
                ret = ldub_phys(addr);
2170
                break;
2171
            case 2:
2172
                ret = lduw_phys(addr);
2173
                break;
2174
            case 4:
2175
                ret = ldl_phys(addr);
2176
                break;
2177
            default:
2178
            case 8:
2179
                ret = ldq_phys(addr);
2180
                break;
2181
            }
2182
            break;
2183
        }
2184
    case 0x24: // Nucleus quad LDD 128 bit atomic
2185
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2186
        //  Only ldda allowed
2187
        raise_exception(TT_ILL_INSN);
2188
        return 0;
2189
    case 0x83: // Secondary no-fault
2190
    case 0x8b: // Secondary no-fault LE
2191
        if (cpu_get_phys_page_debug(env, addr) == -1ULL) {
2192
#ifdef DEBUG_ASI
2193
            dump_asi("read ", last_addr, asi, size, ret);
2194
#endif
2195
            return 0;
2196
        }
2197
        // Fall through
2198
    case 0x04: // Nucleus
2199
    case 0x0c: // Nucleus Little Endian (LE)
2200
    case 0x11: // As if user secondary
2201
    case 0x19: // As if user secondary LE
2202
    case 0x4a: // UPA config
2203
    case 0x81: // Secondary
2204
    case 0x89: // Secondary LE
2205
        // XXX
2206
        break;
2207
    case 0x45: // LSU
2208
        ret = env->lsu;
2209
        break;
2210
    case 0x50: // I-MMU regs
2211
        {
2212
            int reg = (addr >> 3) & 0xf;
2213

    
2214
            if (reg == 0) {
2215
                // I-TSB Tag Target register
2216
                ret = ultrasparc_tag_target(env->immu.tag_access);
2217
            } else {
2218
                ret = env->immuregs[reg];
2219
            }
2220

    
2221
            break;
2222
        }
2223
    case 0x51: // I-MMU 8k TSB pointer
2224
        {
2225
            // env->immuregs[5] holds I-MMU TSB register value
2226
            // env->immuregs[6] holds I-MMU Tag Access register value
2227
            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2228
                                         8*1024);
2229
            break;
2230
        }
2231
    case 0x52: // I-MMU 64k TSB pointer
2232
        {
2233
            // env->immuregs[5] holds I-MMU TSB register value
2234
            // env->immuregs[6] holds I-MMU Tag Access register value
2235
            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2236
                                         64*1024);
2237
            break;
2238
        }
2239
    case 0x55: // I-MMU data access
2240
        {
2241
            int reg = (addr >> 3) & 0x3f;
2242

    
2243
            ret = env->itlb[reg].tte;
2244
            break;
2245
        }
2246
    case 0x56: // I-MMU tag read
2247
        {
2248
            int reg = (addr >> 3) & 0x3f;
2249

    
2250
            ret = env->itlb[reg].tag;
2251
            break;
2252
        }
2253
    case 0x58: // D-MMU regs
2254
        {
2255
            int reg = (addr >> 3) & 0xf;
2256

    
2257
            if (reg == 0) {
2258
                // D-TSB Tag Target register
2259
                ret = ultrasparc_tag_target(env->dmmu.tag_access);
2260
            } else {
2261
                ret = env->dmmuregs[reg];
2262
            }
2263
            break;
2264
        }
2265
    case 0x59: // D-MMU 8k TSB pointer
2266
        {
2267
            // env->dmmuregs[5] holds D-MMU TSB register value
2268
            // env->dmmuregs[6] holds D-MMU Tag Access register value
2269
            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2270
                                         8*1024);
2271
            break;
2272
        }
2273
    case 0x5a: // D-MMU 64k TSB pointer
2274
        {
2275
            // env->dmmuregs[5] holds D-MMU TSB register value
2276
            // env->dmmuregs[6] holds D-MMU Tag Access register value
2277
            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2278
                                         64*1024);
2279
            break;
2280
        }
2281
    case 0x5d: // D-MMU data access
2282
        {
2283
            int reg = (addr >> 3) & 0x3f;
2284

    
2285
            ret = env->dtlb[reg].tte;
2286
            break;
2287
        }
2288
    case 0x5e: // D-MMU tag read
2289
        {
2290
            int reg = (addr >> 3) & 0x3f;
2291

    
2292
            ret = env->dtlb[reg].tag;
2293
            break;
2294
        }
2295
    case 0x46: // D-cache data
2296
    case 0x47: // D-cache tag access
2297
    case 0x4b: // E-cache error enable
2298
    case 0x4c: // E-cache asynchronous fault status
2299
    case 0x4d: // E-cache asynchronous fault address
2300
    case 0x4e: // E-cache tag data
2301
    case 0x66: // I-cache instruction access
2302
    case 0x67: // I-cache tag access
2303
    case 0x6e: // I-cache predecode
2304
    case 0x6f: // I-cache LRU etc.
2305
    case 0x76: // E-cache tag
2306
    case 0x7e: // E-cache tag
2307
        break;
2308
    case 0x5b: // D-MMU data pointer
2309
    case 0x48: // Interrupt dispatch, RO
2310
    case 0x49: // Interrupt data receive
2311
    case 0x7f: // Incoming interrupt vector, RO
2312
        // XXX
2313
        break;
2314
    case 0x54: // I-MMU data in, WO
2315
    case 0x57: // I-MMU demap, WO
2316
    case 0x5c: // D-MMU data in, WO
2317
    case 0x5f: // D-MMU demap, WO
2318
    case 0x77: // Interrupt vector, WO
2319
    default:
2320
        do_unassigned_access(addr, 0, 0, 1, size);
2321
        ret = 0;
2322
        break;
2323
    }
2324

    
2325
    /* Convert from little endian */
2326
    switch (asi) {
2327
    case 0x0c: // Nucleus Little Endian (LE)
2328
    case 0x18: // As if user primary LE
2329
    case 0x19: // As if user secondary LE
2330
    case 0x1c: // Bypass LE
2331
    case 0x1d: // Bypass, non-cacheable LE
2332
    case 0x88: // Primary LE
2333
    case 0x89: // Secondary LE
2334
    case 0x8a: // Primary no-fault LE
2335
    case 0x8b: // Secondary no-fault LE
2336
        switch(size) {
2337
        case 2:
2338
            ret = bswap16(ret);
2339
            break;
2340
        case 4:
2341
            ret = bswap32(ret);
2342
            break;
2343
        case 8:
2344
            ret = bswap64(ret);
2345
            break;
2346
        default:
2347
            break;
2348
        }
2349
    default:
2350
        break;
2351
    }
2352

    
2353
    /* Convert to signed number */
2354
    if (sign) {
2355
        switch(size) {
2356
        case 1:
2357
            ret = (int8_t) ret;
2358
            break;
2359
        case 2:
2360
            ret = (int16_t) ret;
2361
            break;
2362
        case 4:
2363
            ret = (int32_t) ret;
2364
            break;
2365
        default:
2366
            break;
2367
        }
2368
    }
2369
#ifdef DEBUG_ASI
2370
    dump_asi("read ", last_addr, asi, size, ret);
2371
#endif
2372
    return ret;
2373
}
2374

    
2375
void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2376
{
2377
#ifdef DEBUG_ASI
2378
    dump_asi("write", addr, asi, size, val);
2379
#endif
2380

    
2381
    asi &= 0xff;
2382

    
2383
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2384
        || ((env->def->features & CPU_FEATURE_HYPV)
2385
            && asi >= 0x30 && asi < 0x80
2386
            && !(env->hpstate & HS_PRIV)))
2387
        raise_exception(TT_PRIV_ACT);
2388

    
2389
    helper_check_align(addr, size - 1);
2390
    /* Convert to little endian */
2391
    switch (asi) {
2392
    case 0x0c: // Nucleus Little Endian (LE)
2393
    case 0x18: // As if user primary LE
2394
    case 0x19: // As if user secondary LE
2395
    case 0x1c: // Bypass LE
2396
    case 0x1d: // Bypass, non-cacheable LE
2397
    case 0x88: // Primary LE
2398
    case 0x89: // Secondary LE
2399
        switch(size) {
2400
        case 2:
2401
            val = bswap16(val);
2402
            break;
2403
        case 4:
2404
            val = bswap32(val);
2405
            break;
2406
        case 8:
2407
            val = bswap64(val);
2408
            break;
2409
        default:
2410
            break;
2411
        }
2412
    default:
2413
        break;
2414
    }
2415

    
2416
    switch(asi) {
2417
    case 0x10: // As if user primary
2418
    case 0x18: // As if user primary LE
2419
    case 0x80: // Primary
2420
    case 0x88: // Primary LE
2421
    case 0xe2: // UA2007 Primary block init
2422
    case 0xe3: // UA2007 Secondary block init
2423
        if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2424
            if ((env->def->features & CPU_FEATURE_HYPV)
2425
                && env->hpstate & HS_PRIV) {
2426
                switch(size) {
2427
                case 1:
2428
                    stb_hypv(addr, val);
2429
                    break;
2430
                case 2:
2431
                    stw_hypv(addr, val);
2432
                    break;
2433
                case 4:
2434
                    stl_hypv(addr, val);
2435
                    break;
2436
                case 8:
2437
                default:
2438
                    stq_hypv(addr, val);
2439
                    break;
2440
                }
2441
            } else {
2442
                switch(size) {
2443
                case 1:
2444
                    stb_kernel(addr, val);
2445
                    break;
2446
                case 2:
2447
                    stw_kernel(addr, val);
2448
                    break;
2449
                case 4:
2450
                    stl_kernel(addr, val);
2451
                    break;
2452
                case 8:
2453
                default:
2454
                    stq_kernel(addr, val);
2455
                    break;
2456
                }
2457
            }
2458
        } else {
2459
            switch(size) {
2460
            case 1:
2461
                stb_user(addr, val);
2462
                break;
2463
            case 2:
2464
                stw_user(addr, val);
2465
                break;
2466
            case 4:
2467
                stl_user(addr, val);
2468
                break;
2469
            case 8:
2470
            default:
2471
                stq_user(addr, val);
2472
                break;
2473
            }
2474
        }
2475
        break;
2476
    case 0x14: // Bypass
2477
    case 0x15: // Bypass, non-cacheable
2478
    case 0x1c: // Bypass LE
2479
    case 0x1d: // Bypass, non-cacheable LE
2480
        {
2481
            switch(size) {
2482
            case 1:
2483
                stb_phys(addr, val);
2484
                break;
2485
            case 2:
2486
                stw_phys(addr, val);
2487
                break;
2488
            case 4:
2489
                stl_phys(addr, val);
2490
                break;
2491
            case 8:
2492
            default:
2493
                stq_phys(addr, val);
2494
                break;
2495
            }
2496
        }
2497
        return;
2498
    case 0x24: // Nucleus quad LDD 128 bit atomic
2499
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2500
        //  Only ldda allowed
2501
        raise_exception(TT_ILL_INSN);
2502
        return;
2503
    case 0x04: // Nucleus
2504
    case 0x0c: // Nucleus Little Endian (LE)
2505
    case 0x11: // As if user secondary
2506
    case 0x19: // As if user secondary LE
2507
    case 0x4a: // UPA config
2508
    case 0x81: // Secondary
2509
    case 0x89: // Secondary LE
2510
        // XXX
2511
        return;
2512
    case 0x45: // LSU
2513
        {
2514
            uint64_t oldreg;
2515

    
2516
            oldreg = env->lsu;
2517
            env->lsu = val & (DMMU_E | IMMU_E);
2518
            // Mappings generated during D/I MMU disabled mode are
2519
            // invalid in normal mode
2520
            if (oldreg != env->lsu) {
2521
                DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
2522
                            oldreg, env->lsu);
2523
#ifdef DEBUG_MMU
2524
                dump_mmu(env);
2525
#endif
2526
                tlb_flush(env, 1);
2527
            }
2528
            return;
2529
        }
2530
    case 0x50: // I-MMU regs
2531
        {
2532
            int reg = (addr >> 3) & 0xf;
2533
            uint64_t oldreg;
2534

    
2535
            oldreg = env->immuregs[reg];
2536
            switch(reg) {
2537
            case 0: // RO
2538
                return;
2539
            case 1: // Not in I-MMU
2540
            case 2:
2541
                return;
2542
            case 3: // SFSR
2543
                if ((val & 1) == 0)
2544
                    val = 0; // Clear SFSR
2545
                env->immu.sfsr = val;
2546
                break;
2547
            case 4: // RO
2548
                return;
2549
            case 5: // TSB access
2550
                DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
2551
                            PRIx64 "\n", env->immu.tsb, val);
2552
                env->immu.tsb = val;
2553
                break;
2554
            case 6: // Tag access
2555
                env->immu.tag_access = val;
2556
                break;
2557
            case 7:
2558
            case 8:
2559
                return;
2560
            default:
2561
                break;
2562
            }
2563

    
2564
            if (oldreg != env->immuregs[reg]) {
2565
                DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
2566
                            PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
2567
            }
2568
#ifdef DEBUG_MMU
2569
            dump_mmu(env);
2570
#endif
2571
            return;
2572
        }
2573
    case 0x54: // I-MMU data in
2574
        replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
2575
        return;
2576
    case 0x55: // I-MMU data access
2577
        {
2578
            // TODO: auto demap
2579

    
2580
            unsigned int i = (addr >> 3) & 0x3f;
2581

    
2582
            replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
2583

    
2584
#ifdef DEBUG_MMU
2585
            DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
2586
            dump_mmu(env);
2587
#endif
2588
            return;
2589
        }
2590
    case 0x57: // I-MMU demap
2591
        demap_tlb(env->itlb, val, "immu", env);
2592
        return;
2593
    case 0x58: // D-MMU regs
2594
        {
2595
            int reg = (addr >> 3) & 0xf;
2596
            uint64_t oldreg;
2597

    
2598
            oldreg = env->dmmuregs[reg];
2599
            switch(reg) {
2600
            case 0: // RO
2601
            case 4:
2602
                return;
2603
            case 3: // SFSR
2604
                if ((val & 1) == 0) {
2605
                    val = 0; // Clear SFSR, Fault address
2606
                    env->dmmu.sfar = 0;
2607
                }
2608
                env->dmmu.sfsr = val;
2609
                break;
2610
            case 1: // Primary context
2611
                env->dmmu.mmu_primary_context = val;
2612
                break;
2613
            case 2: // Secondary context
2614
                env->dmmu.mmu_secondary_context = val;
2615
                break;
2616
            case 5: // TSB access
2617
                DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
2618
                            PRIx64 "\n", env->dmmu.tsb, val);
2619
                env->dmmu.tsb = val;
2620
                break;
2621
            case 6: // Tag access
2622
                env->dmmu.tag_access = val;
2623
                break;
2624
            case 7: // Virtual Watchpoint
2625
            case 8: // Physical Watchpoint
2626
            default:
2627
                env->dmmuregs[reg] = val;
2628
                break;
2629
            }
2630

    
2631
            if (oldreg != env->dmmuregs[reg]) {
2632
                DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
2633
                            PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
2634
            }
2635
#ifdef DEBUG_MMU
2636
            dump_mmu(env);
2637
#endif
2638
            return;
2639
        }
2640
    case 0x5c: // D-MMU data in
2641
        replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
2642
        return;
2643
    case 0x5d: // D-MMU data access
2644
        {
2645
            unsigned int i = (addr >> 3) & 0x3f;
2646

    
2647
            replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
2648

    
2649
#ifdef DEBUG_MMU
2650
            DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
2651
            dump_mmu(env);
2652
#endif
2653
            return;
2654
        }
2655
    case 0x5f: // D-MMU demap
2656
        demap_tlb(env->dtlb, val, "dmmu", env);
2657
        return;
2658
    case 0x49: // Interrupt data receive
2659
        // XXX
2660
        return;
2661
    case 0x46: // D-cache data
2662
    case 0x47: // D-cache tag access
2663
    case 0x4b: // E-cache error enable
2664
    case 0x4c: // E-cache asynchronous fault status
2665
    case 0x4d: // E-cache asynchronous fault address
2666
    case 0x4e: // E-cache tag data
2667
    case 0x66: // I-cache instruction access
2668
    case 0x67: // I-cache tag access
2669
    case 0x6e: // I-cache predecode
2670
    case 0x6f: // I-cache LRU etc.
2671
    case 0x76: // E-cache tag
2672
    case 0x7e: // E-cache tag
2673
        return;
2674
    case 0x51: // I-MMU 8k TSB pointer, RO
2675
    case 0x52: // I-MMU 64k TSB pointer, RO
2676
    case 0x56: // I-MMU tag read, RO
2677
    case 0x59: // D-MMU 8k TSB pointer, RO
2678
    case 0x5a: // D-MMU 64k TSB pointer, RO
2679
    case 0x5b: // D-MMU data pointer, RO
2680
    case 0x5e: // D-MMU tag read, RO
2681
    case 0x48: // Interrupt dispatch, RO
2682
    case 0x7f: // Incoming interrupt vector, RO
2683
    case 0x82: // Primary no-fault, RO
2684
    case 0x83: // Secondary no-fault, RO
2685
    case 0x8a: // Primary no-fault LE, RO
2686
    case 0x8b: // Secondary no-fault LE, RO
2687
    default:
2688
        do_unassigned_access(addr, 1, 0, 1, size);
2689
        return;
2690
    }
2691
}
2692
#endif /* CONFIG_USER_ONLY */
2693

    
2694
void helper_ldda_asi(target_ulong addr, int asi, int rd)
2695
{
2696
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2697
        || ((env->def->features & CPU_FEATURE_HYPV)
2698
            && asi >= 0x30 && asi < 0x80
2699
            && !(env->hpstate & HS_PRIV)))
2700
        raise_exception(TT_PRIV_ACT);
2701

    
2702
    switch (asi) {
2703
    case 0x24: // Nucleus quad LDD 128 bit atomic
2704
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2705
        helper_check_align(addr, 0xf);
2706
        if (rd == 0) {
2707
            env->gregs[1] = ldq_kernel(addr + 8);
2708
            if (asi == 0x2c)
2709
                bswap64s(&env->gregs[1]);
2710
        } else if (rd < 8) {
2711
            env->gregs[rd] = ldq_kernel(addr);
2712
            env->gregs[rd + 1] = ldq_kernel(addr + 8);
2713
            if (asi == 0x2c) {
2714
                bswap64s(&env->gregs[rd]);
2715
                bswap64s(&env->gregs[rd + 1]);
2716
            }
2717
        } else {
2718
            env->regwptr[rd] = ldq_kernel(addr);
2719
            env->regwptr[rd + 1] = ldq_kernel(addr + 8);
2720
            if (asi == 0x2c) {
2721
                bswap64s(&env->regwptr[rd]);
2722
                bswap64s(&env->regwptr[rd + 1]);
2723
            }
2724
        }
2725
        break;
2726
    default:
2727
        helper_check_align(addr, 0x3);
2728
        if (rd == 0)
2729
            env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
2730
        else if (rd < 8) {
2731
            env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
2732
            env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2733
        } else {
2734
            env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
2735
            env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2736
        }
2737
        break;
2738
    }
2739
}
2740

    
2741
void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
2742
{
2743
    unsigned int i;
2744
    target_ulong val;
2745

    
2746
    helper_check_align(addr, 3);
2747
    switch (asi) {
2748
    case 0xf0: // Block load primary
2749
    case 0xf1: // Block load secondary
2750
    case 0xf8: // Block load primary LE
2751
    case 0xf9: // Block load secondary LE
2752
        if (rd & 7) {
2753
            raise_exception(TT_ILL_INSN);
2754
            return;
2755
        }
2756
        helper_check_align(addr, 0x3f);
2757
        for (i = 0; i < 16; i++) {
2758
            *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
2759
                                                         0);
2760
            addr += 4;
2761
        }
2762

    
2763
        return;
2764
    default:
2765
        break;
2766
    }
2767

    
2768
    val = helper_ld_asi(addr, asi, size, 0);
2769
    switch(size) {
2770
    default:
2771
    case 4:
2772
        *((uint32_t *)&env->fpr[rd]) = val;
2773
        break;
2774
    case 8:
2775
        *((int64_t *)&DT0) = val;
2776
        break;
2777
    case 16:
2778
        // XXX
2779
        break;
2780
    }
2781
}
2782

    
2783
void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
2784
{
2785
    unsigned int i;
2786
    target_ulong val = 0;
2787

    
2788
    helper_check_align(addr, 3);
2789
    switch (asi) {
2790
    case 0xe0: // UA2007 Block commit store primary (cache flush)
2791
    case 0xe1: // UA2007 Block commit store secondary (cache flush)
2792
    case 0xf0: // Block store primary
2793
    case 0xf1: // Block store secondary
2794
    case 0xf8: // Block store primary LE
2795
    case 0xf9: // Block store secondary LE
2796
        if (rd & 7) {
2797
            raise_exception(TT_ILL_INSN);
2798
            return;
2799
        }
2800
        helper_check_align(addr, 0x3f);
2801
        for (i = 0; i < 16; i++) {
2802
            val = *(uint32_t *)&env->fpr[rd++];
2803
            helper_st_asi(addr, val, asi & 0x8f, 4);
2804
            addr += 4;
2805
        }
2806

    
2807
        return;
2808
    default:
2809
        break;
2810
    }
2811

    
2812
    switch(size) {
2813
    default:
2814
    case 4:
2815
        val = *((uint32_t *)&env->fpr[rd]);
2816
        break;
2817
    case 8:
2818
        val = *((int64_t *)&DT0);
2819
        break;
2820
    case 16:
2821
        // XXX
2822
        break;
2823
    }
2824
    helper_st_asi(addr, val, asi, size);
2825
}
2826

    
2827
target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
2828
                            target_ulong val2, uint32_t asi)
2829
{
2830
    target_ulong ret;
2831

    
2832
    val2 &= 0xffffffffUL;
2833
    ret = helper_ld_asi(addr, asi, 4, 0);
2834
    ret &= 0xffffffffUL;
2835
    if (val2 == ret)
2836
        helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4);
2837
    return ret;
2838
}
2839

    
2840
target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
2841
                             target_ulong val2, uint32_t asi)
2842
{
2843
    target_ulong ret;
2844

    
2845
    ret = helper_ld_asi(addr, asi, 8, 0);
2846
    if (val2 == ret)
2847
        helper_st_asi(addr, val1, asi, 8);
2848
    return ret;
2849
}
2850
#endif /* TARGET_SPARC64 */
2851

    
2852
#ifndef TARGET_SPARC64
2853
void helper_rett(void)
2854
{
2855
    unsigned int cwp;
2856

    
2857
    if (env->psret == 1)
2858
        raise_exception(TT_ILL_INSN);
2859

    
2860
    env->psret = 1;
2861
    cwp = cpu_cwp_inc(env, env->cwp + 1) ;
2862
    if (env->wim & (1 << cwp)) {
2863
        raise_exception(TT_WIN_UNF);
2864
    }
2865
    set_cwp(cwp);
2866
    env->psrs = env->psrps;
2867
}
2868
#endif
2869

    
2870
target_ulong helper_udiv(target_ulong a, target_ulong b)
2871
{
2872
    uint64_t x0;
2873
    uint32_t x1;
2874

    
2875
    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
2876
    x1 = b;
2877

    
2878
    if (x1 == 0) {
2879
        raise_exception(TT_DIV_ZERO);
2880
    }
2881

    
2882
    x0 = x0 / x1;
2883
    if (x0 > 0xffffffff) {
2884
        env->cc_src2 = 1;
2885
        return 0xffffffff;
2886
    } else {
2887
        env->cc_src2 = 0;
2888
        return x0;
2889
    }
2890
}
2891

    
2892
target_ulong helper_sdiv(target_ulong a, target_ulong b)
2893
{
2894
    int64_t x0;
2895
    int32_t x1;
2896

    
2897
    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
2898
    x1 = b;
2899

    
2900
    if (x1 == 0) {
2901
        raise_exception(TT_DIV_ZERO);
2902
    }
2903

    
2904
    x0 = x0 / x1;
2905
    if ((int32_t) x0 != x0) {
2906
        env->cc_src2 = 1;
2907
        return x0 < 0? 0x80000000: 0x7fffffff;
2908
    } else {
2909
        env->cc_src2 = 0;
2910
        return x0;
2911
    }
2912
}
2913

    
2914
void helper_stdf(target_ulong addr, int mem_idx)
2915
{
2916
    helper_check_align(addr, 7);
2917
#if !defined(CONFIG_USER_ONLY)
2918
    switch (mem_idx) {
2919
    case 0:
2920
        stfq_user(addr, DT0);
2921
        break;
2922
    case 1:
2923
        stfq_kernel(addr, DT0);
2924
        break;
2925
#ifdef TARGET_SPARC64
2926
    case 2:
2927
        stfq_hypv(addr, DT0);
2928
        break;
2929
#endif
2930
    default:
2931
        break;
2932
    }
2933
#else
2934
    address_mask(env, &addr);
2935
    stfq_raw(addr, DT0);
2936
#endif
2937
}
2938

    
2939
void helper_lddf(target_ulong addr, int mem_idx)
2940
{
2941
    helper_check_align(addr, 7);
2942
#if !defined(CONFIG_USER_ONLY)
2943
    switch (mem_idx) {
2944
    case 0:
2945
        DT0 = ldfq_user(addr);
2946
        break;
2947
    case 1:
2948
        DT0 = ldfq_kernel(addr);
2949
        break;
2950
#ifdef TARGET_SPARC64
2951
    case 2:
2952
        DT0 = ldfq_hypv(addr);
2953
        break;
2954
#endif
2955
    default:
2956
        break;
2957
    }
2958
#else
2959
    address_mask(env, &addr);
2960
    DT0 = ldfq_raw(addr);
2961
#endif
2962
}
2963

    
2964
void helper_ldqf(target_ulong addr, int mem_idx)
2965
{
2966
    // XXX add 128 bit load
2967
    CPU_QuadU u;
2968

    
2969
    helper_check_align(addr, 7);
2970
#if !defined(CONFIG_USER_ONLY)
2971
    switch (mem_idx) {
2972
    case 0:
2973
        u.ll.upper = ldq_user(addr);
2974
        u.ll.lower = ldq_user(addr + 8);
2975
        QT0 = u.q;
2976
        break;
2977
    case 1:
2978
        u.ll.upper = ldq_kernel(addr);
2979
        u.ll.lower = ldq_kernel(addr + 8);
2980
        QT0 = u.q;
2981
        break;
2982
#ifdef TARGET_SPARC64
2983
    case 2:
2984
        u.ll.upper = ldq_hypv(addr);
2985
        u.ll.lower = ldq_hypv(addr + 8);
2986
        QT0 = u.q;
2987
        break;
2988
#endif
2989
    default:
2990
        break;
2991
    }
2992
#else
2993
    address_mask(env, &addr);
2994
    u.ll.upper = ldq_raw(addr);
2995
    u.ll.lower = ldq_raw((addr + 8) & 0xffffffffULL);
2996
    QT0 = u.q;
2997
#endif
2998
}
2999

    
3000
void helper_stqf(target_ulong addr, int mem_idx)
3001
{
3002
    // XXX add 128 bit store
3003
    CPU_QuadU u;
3004

    
3005
    helper_check_align(addr, 7);
3006
#if !defined(CONFIG_USER_ONLY)
3007
    switch (mem_idx) {
3008
    case 0:
3009
        u.q = QT0;
3010
        stq_user(addr, u.ll.upper);
3011
        stq_user(addr + 8, u.ll.lower);
3012
        break;
3013
    case 1:
3014
        u.q = QT0;
3015
        stq_kernel(addr, u.ll.upper);
3016
        stq_kernel(addr + 8, u.ll.lower);
3017
        break;
3018
#ifdef TARGET_SPARC64
3019
    case 2:
3020
        u.q = QT0;
3021
        stq_hypv(addr, u.ll.upper);
3022
        stq_hypv(addr + 8, u.ll.lower);
3023
        break;
3024
#endif
3025
    default:
3026
        break;
3027
    }
3028
#else
3029
    u.q = QT0;
3030
    address_mask(env, &addr);
3031
    stq_raw(addr, u.ll.upper);
3032
    stq_raw((addr + 8) & 0xffffffffULL, u.ll.lower);
3033
#endif
3034
}
3035

    
3036
static inline void set_fsr(void)
3037
{
3038
    int rnd_mode;
3039

    
3040
    switch (env->fsr & FSR_RD_MASK) {
3041
    case FSR_RD_NEAREST:
3042
        rnd_mode = float_round_nearest_even;
3043
        break;
3044
    default:
3045
    case FSR_RD_ZERO:
3046
        rnd_mode = float_round_to_zero;
3047
        break;
3048
    case FSR_RD_POS:
3049
        rnd_mode = float_round_up;
3050
        break;
3051
    case FSR_RD_NEG:
3052
        rnd_mode = float_round_down;
3053
        break;
3054
    }
3055
    set_float_rounding_mode(rnd_mode, &env->fp_status);
3056
}
3057

    
3058
void helper_ldfsr(uint32_t new_fsr)
3059
{
3060
    env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK);
3061
    set_fsr();
3062
}
3063

    
3064
#ifdef TARGET_SPARC64
3065
void helper_ldxfsr(uint64_t new_fsr)
3066
{
3067
    env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK);
3068
    set_fsr();
3069
}
3070
#endif
3071

    
3072
void helper_debug(void)
3073
{
3074
    env->exception_index = EXCP_DEBUG;
3075
    cpu_loop_exit();
3076
}
3077

    
3078
#ifndef TARGET_SPARC64
3079
/* XXX: use another pointer for %iN registers to avoid slow wrapping
3080
   handling ? */
3081
void helper_save(void)
3082
{
3083
    uint32_t cwp;
3084

    
3085
    cwp = cpu_cwp_dec(env, env->cwp - 1);
3086
    if (env->wim & (1 << cwp)) {
3087
        raise_exception(TT_WIN_OVF);
3088
    }
3089
    set_cwp(cwp);
3090
}
3091

    
3092
void helper_restore(void)
3093
{
3094
    uint32_t cwp;
3095

    
3096
    cwp = cpu_cwp_inc(env, env->cwp + 1);
3097
    if (env->wim & (1 << cwp)) {
3098
        raise_exception(TT_WIN_UNF);
3099
    }
3100
    set_cwp(cwp);
3101
}
3102

    
3103
void helper_wrpsr(target_ulong new_psr)
3104
{
3105
    if ((new_psr & PSR_CWP) >= env->nwindows)
3106
        raise_exception(TT_ILL_INSN);
3107
    else
3108
        PUT_PSR(env, new_psr);
3109
}
3110

    
3111
target_ulong helper_rdpsr(void)
3112
{
3113
    return GET_PSR(env);
3114
}
3115

    
3116
#else
3117
/* XXX: use another pointer for %iN registers to avoid slow wrapping
3118
   handling ? */
3119
void helper_save(void)
3120
{
3121
    uint32_t cwp;
3122

    
3123
    cwp = cpu_cwp_dec(env, env->cwp - 1);
3124
    if (env->cansave == 0) {
3125
        raise_exception(TT_SPILL | (env->otherwin != 0 ?
3126
                                    (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3127
                                    ((env->wstate & 0x7) << 2)));
3128
    } else {
3129
        if (env->cleanwin - env->canrestore == 0) {
3130
            // XXX Clean windows without trap
3131
            raise_exception(TT_CLRWIN);
3132
        } else {
3133
            env->cansave--;
3134
            env->canrestore++;
3135
            set_cwp(cwp);
3136
        }
3137
    }
3138
}
3139

    
3140
void helper_restore(void)
3141
{
3142
    uint32_t cwp;
3143

    
3144
    cwp = cpu_cwp_inc(env, env->cwp + 1);
3145
    if (env->canrestore == 0) {
3146
        raise_exception(TT_FILL | (env->otherwin != 0 ?
3147
                                   (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3148
                                   ((env->wstate & 0x7) << 2)));
3149
    } else {
3150
        env->cansave++;
3151
        env->canrestore--;
3152
        set_cwp(cwp);
3153
    }
3154
}
3155

    
3156
void helper_flushw(void)
3157
{
3158
    if (env->cansave != env->nwindows - 2) {
3159
        raise_exception(TT_SPILL | (env->otherwin != 0 ?
3160
                                    (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3161
                                    ((env->wstate & 0x7) << 2)));
3162
    }
3163
}
3164

    
3165
void helper_saved(void)
3166
{
3167
    env->cansave++;
3168
    if (env->otherwin == 0)
3169
        env->canrestore--;
3170
    else
3171
        env->otherwin--;
3172
}
3173

    
3174
void helper_restored(void)
3175
{
3176
    env->canrestore++;
3177
    if (env->cleanwin < env->nwindows - 1)
3178
        env->cleanwin++;
3179
    if (env->otherwin == 0)
3180
        env->cansave--;
3181
    else
3182
        env->otherwin--;
3183
}
3184

    
3185
target_ulong helper_rdccr(void)
3186
{
3187
    return GET_CCR(env);
3188
}
3189

    
3190
void helper_wrccr(target_ulong new_ccr)
3191
{
3192
    PUT_CCR(env, new_ccr);
3193
}
3194

    
3195
// CWP handling is reversed in V9, but we still use the V8 register
3196
// order.
3197
target_ulong helper_rdcwp(void)
3198
{
3199
    return GET_CWP64(env);
3200
}
3201

    
3202
void helper_wrcwp(target_ulong new_cwp)
3203
{
3204
    PUT_CWP64(env, new_cwp);
3205
}
3206

    
3207
// This function uses non-native bit order
3208
#define GET_FIELD(X, FROM, TO)                                  \
3209
    ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
3210

    
3211
// This function uses the order in the manuals, i.e. bit 0 is 2^0
3212
#define GET_FIELD_SP(X, FROM, TO)               \
3213
    GET_FIELD(X, 63 - (TO), 63 - (FROM))
3214

    
3215
target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
3216
{
3217
    return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
3218
        (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
3219
        (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
3220
        (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
3221
        (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
3222
        (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
3223
        (((pixel_addr >> 55) & 1) << 4) |
3224
        (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
3225
        GET_FIELD_SP(pixel_addr, 11, 12);
3226
}
3227

    
3228
target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
3229
{
3230
    uint64_t tmp;
3231

    
3232
    tmp = addr + offset;
3233
    env->gsr &= ~7ULL;
3234
    env->gsr |= tmp & 7ULL;
3235
    return tmp & ~7ULL;
3236
}
3237

    
3238
target_ulong helper_popc(target_ulong val)
3239
{
3240
    return ctpop64(val);
3241
}
3242

    
3243
static inline uint64_t *get_gregset(uint32_t pstate)
3244
{
3245
    switch (pstate) {
3246
    default:
3247
    case 0:
3248
        return env->bgregs;
3249
    case PS_AG:
3250
        return env->agregs;
3251
    case PS_MG:
3252
        return env->mgregs;
3253
    case PS_IG:
3254
        return env->igregs;
3255
    }
3256
}
3257

    
3258
static inline void change_pstate(uint32_t new_pstate)
3259
{
3260
    uint32_t pstate_regs, new_pstate_regs;
3261
    uint64_t *src, *dst;
3262

    
3263
    if (env->def->features & CPU_FEATURE_GL) {
3264
        // PS_AG is not implemented in this case
3265
        new_pstate &= ~PS_AG;
3266
    }
3267

    
3268
    pstate_regs = env->pstate & 0xc01;
3269
    new_pstate_regs = new_pstate & 0xc01;
3270

    
3271
    if (new_pstate_regs != pstate_regs) {
3272
        // Switch global register bank
3273
        src = get_gregset(new_pstate_regs);
3274
        dst = get_gregset(pstate_regs);
3275
        memcpy32(dst, env->gregs);
3276
        memcpy32(env->gregs, src);
3277
    }
3278
    env->pstate = new_pstate;
3279
}
3280

    
3281
void helper_wrpstate(target_ulong new_state)
3282
{
3283
    change_pstate(new_state & 0xf3f);
3284
}
3285

    
3286
void helper_done(void)
3287
{
3288
    trap_state* tsptr = cpu_tsptr(env);
3289

    
3290
    env->pc = tsptr->tnpc;
3291
    env->npc = tsptr->tnpc + 4;
3292
    PUT_CCR(env, tsptr->tstate >> 32);
3293
    env->asi = (tsptr->tstate >> 24) & 0xff;
3294
    change_pstate((tsptr->tstate >> 8) & 0xf3f);
3295
    PUT_CWP64(env, tsptr->tstate & 0xff);
3296
    env->tl--;
3297
}
3298

    
3299
void helper_retry(void)
3300
{
3301
    trap_state* tsptr = cpu_tsptr(env);
3302

    
3303
    env->pc = tsptr->tpc;
3304
    env->npc = tsptr->tnpc;
3305
    PUT_CCR(env, tsptr->tstate >> 32);
3306
    env->asi = (tsptr->tstate >> 24) & 0xff;
3307
    change_pstate((tsptr->tstate >> 8) & 0xf3f);
3308
    PUT_CWP64(env, tsptr->tstate & 0xff);
3309
    env->tl--;
3310
}
3311

    
3312
void helper_set_softint(uint64_t value)
3313
{
3314
    env->softint |= (uint32_t)value;
3315
}
3316

    
3317
void helper_clear_softint(uint64_t value)
3318
{
3319
    env->softint &= (uint32_t)~value;
3320
}
3321

    
3322
void helper_write_softint(uint64_t value)
3323
{
3324
    env->softint = (uint32_t)value;
3325
}
3326
#endif
3327

    
3328
void helper_flush(target_ulong addr)
3329
{
3330
    addr &= ~7;
3331
    tb_invalidate_page_range(addr, addr + 8);
3332
}
3333

    
3334
#ifdef TARGET_SPARC64
3335
#ifdef DEBUG_PCALL
3336
static const char * const excp_names[0x80] = {
3337
    [TT_TFAULT] = "Instruction Access Fault",
3338
    [TT_TMISS] = "Instruction Access MMU Miss",
3339
    [TT_CODE_ACCESS] = "Instruction Access Error",
3340
    [TT_ILL_INSN] = "Illegal Instruction",
3341
    [TT_PRIV_INSN] = "Privileged Instruction",
3342
    [TT_NFPU_INSN] = "FPU Disabled",
3343
    [TT_FP_EXCP] = "FPU Exception",
3344
    [TT_TOVF] = "Tag Overflow",
3345
    [TT_CLRWIN] = "Clean Windows",
3346
    [TT_DIV_ZERO] = "Division By Zero",
3347
    [TT_DFAULT] = "Data Access Fault",
3348
    [TT_DMISS] = "Data Access MMU Miss",
3349
    [TT_DATA_ACCESS] = "Data Access Error",
3350
    [TT_DPROT] = "Data Protection Error",
3351
    [TT_UNALIGNED] = "Unaligned Memory Access",
3352
    [TT_PRIV_ACT] = "Privileged Action",
3353
    [TT_EXTINT | 0x1] = "External Interrupt 1",
3354
    [TT_EXTINT | 0x2] = "External Interrupt 2",
3355
    [TT_EXTINT | 0x3] = "External Interrupt 3",
3356
    [TT_EXTINT | 0x4] = "External Interrupt 4",
3357
    [TT_EXTINT | 0x5] = "External Interrupt 5",
3358
    [TT_EXTINT | 0x6] = "External Interrupt 6",
3359
    [TT_EXTINT | 0x7] = "External Interrupt 7",
3360
    [TT_EXTINT | 0x8] = "External Interrupt 8",
3361
    [TT_EXTINT | 0x9] = "External Interrupt 9",
3362
    [TT_EXTINT | 0xa] = "External Interrupt 10",
3363
    [TT_EXTINT | 0xb] = "External Interrupt 11",
3364
    [TT_EXTINT | 0xc] = "External Interrupt 12",
3365
    [TT_EXTINT | 0xd] = "External Interrupt 13",
3366
    [TT_EXTINT | 0xe] = "External Interrupt 14",
3367
    [TT_EXTINT | 0xf] = "External Interrupt 15",
3368
};
3369
#endif
3370

    
3371
trap_state* cpu_tsptr(CPUState* env)
3372
{
3373
    return &env->ts[env->tl & MAXTL_MASK];
3374
}
3375

    
3376
void do_interrupt(CPUState *env)
3377
{
3378
    int intno = env->exception_index;
3379
    trap_state* tsptr;
3380

    
3381
#ifdef DEBUG_PCALL
3382
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
3383
        static int count;
3384
        const char *name;
3385

    
3386
        if (intno < 0 || intno >= 0x180)
3387
            name = "Unknown";
3388
        else if (intno >= 0x100)
3389
            name = "Trap Instruction";
3390
        else if (intno >= 0xc0)
3391
            name = "Window Fill";
3392
        else if (intno >= 0x80)
3393
            name = "Window Spill";
3394
        else {
3395
            name = excp_names[intno];
3396
            if (!name)
3397
                name = "Unknown";
3398
        }
3399

    
3400
        qemu_log("%6d: %s (v=%04x) pc=%016" PRIx64 " npc=%016" PRIx64
3401
                " SP=%016" PRIx64 "\n",
3402
                count, name, intno,
3403
                env->pc,
3404
                env->npc, env->regwptr[6]);
3405
        log_cpu_state(env, 0);
3406
#if 0
3407
        {
3408
            int i;
3409
            uint8_t *ptr;
3410

3411
            qemu_log("       code=");
3412
            ptr = (uint8_t *)env->pc;
3413
            for(i = 0; i < 16; i++) {
3414
                qemu_log(" %02x", ldub(ptr + i));
3415
            }
3416
            qemu_log("\n");
3417
        }
3418
#endif
3419
        count++;
3420
    }
3421
#endif
3422
#if !defined(CONFIG_USER_ONLY)
3423
    if (env->tl >= env->maxtl) {
3424
        cpu_abort(env, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
3425
                  " Error state", env->exception_index, env->tl, env->maxtl);
3426
        return;
3427
    }
3428
#endif
3429
    if (env->tl < env->maxtl - 1) {
3430
        env->tl++;
3431
    } else {
3432
        env->pstate |= PS_RED;
3433
        if (env->tl < env->maxtl)
3434
            env->tl++;
3435
    }
3436
    tsptr = cpu_tsptr(env);
3437

    
3438
    tsptr->tstate = ((uint64_t)GET_CCR(env) << 32) |
3439
        ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
3440
        GET_CWP64(env);
3441
    tsptr->tpc = env->pc;
3442
    tsptr->tnpc = env->npc;
3443
    tsptr->tt = intno;
3444

    
3445
    switch (intno) {
3446
    case TT_IVEC:
3447
        change_pstate(PS_PEF | PS_PRIV | PS_IG);
3448
        break;
3449
    case TT_TFAULT:
3450
    case TT_DFAULT:
3451
    case TT_TMISS ... TT_TMISS + 3:
3452
    case TT_DMISS ... TT_DMISS + 3:
3453
    case TT_DPROT ... TT_DPROT + 3:
3454
        change_pstate(PS_PEF | PS_PRIV | PS_MG);
3455
        break;
3456
    default:
3457
        change_pstate(PS_PEF | PS_PRIV | PS_AG);
3458
        break;
3459
    }
3460

    
3461
    if (intno == TT_CLRWIN)
3462
        cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1));
3463
    else if ((intno & 0x1c0) == TT_SPILL)
3464
        cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2));
3465
    else if ((intno & 0x1c0) == TT_FILL)
3466
        cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1));
3467
    env->tbr &= ~0x7fffULL;
3468
    env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
3469
    env->pc = env->tbr;
3470
    env->npc = env->pc + 4;
3471
    env->exception_index = -1;
3472
}
3473
#else
3474
#ifdef DEBUG_PCALL
3475
static const char * const excp_names[0x80] = {
3476
    [TT_TFAULT] = "Instruction Access Fault",
3477
    [TT_ILL_INSN] = "Illegal Instruction",
3478
    [TT_PRIV_INSN] = "Privileged Instruction",
3479
    [TT_NFPU_INSN] = "FPU Disabled",
3480
    [TT_WIN_OVF] = "Window Overflow",
3481
    [TT_WIN_UNF] = "Window Underflow",
3482
    [TT_UNALIGNED] = "Unaligned Memory Access",
3483
    [TT_FP_EXCP] = "FPU Exception",
3484
    [TT_DFAULT] = "Data Access Fault",
3485
    [TT_TOVF] = "Tag Overflow",
3486
    [TT_EXTINT | 0x1] = "External Interrupt 1",
3487
    [TT_EXTINT | 0x2] = "External Interrupt 2",
3488
    [TT_EXTINT | 0x3] = "External Interrupt 3",
3489
    [TT_EXTINT | 0x4] = "External Interrupt 4",
3490
    [TT_EXTINT | 0x5] = "External Interrupt 5",
3491
    [TT_EXTINT | 0x6] = "External Interrupt 6",
3492
    [TT_EXTINT | 0x7] = "External Interrupt 7",
3493
    [TT_EXTINT | 0x8] = "External Interrupt 8",
3494
    [TT_EXTINT | 0x9] = "External Interrupt 9",
3495
    [TT_EXTINT | 0xa] = "External Interrupt 10",
3496
    [TT_EXTINT | 0xb] = "External Interrupt 11",
3497
    [TT_EXTINT | 0xc] = "External Interrupt 12",
3498
    [TT_EXTINT | 0xd] = "External Interrupt 13",
3499
    [TT_EXTINT | 0xe] = "External Interrupt 14",
3500
    [TT_EXTINT | 0xf] = "External Interrupt 15",
3501
    [TT_TOVF] = "Tag Overflow",
3502
    [TT_CODE_ACCESS] = "Instruction Access Error",
3503
    [TT_DATA_ACCESS] = "Data Access Error",
3504
    [TT_DIV_ZERO] = "Division By Zero",
3505
    [TT_NCP_INSN] = "Coprocessor Disabled",
3506
};
3507
#endif
3508

    
3509
void do_interrupt(CPUState *env)
3510
{
3511
    int cwp, intno = env->exception_index;
3512

    
3513
#ifdef DEBUG_PCALL
3514
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
3515
        static int count;
3516
        const char *name;
3517

    
3518
        if (intno < 0 || intno >= 0x100)
3519
            name = "Unknown";
3520
        else if (intno >= 0x80)
3521
            name = "Trap Instruction";
3522
        else {
3523
            name = excp_names[intno];
3524
            if (!name)
3525
                name = "Unknown";
3526
        }
3527

    
3528
        qemu_log("%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
3529
                count, name, intno,
3530
                env->pc,
3531
                env->npc, env->regwptr[6]);
3532
        log_cpu_state(env, 0);
3533
#if 0
3534
        {
3535
            int i;
3536
            uint8_t *ptr;
3537

3538
            qemu_log("       code=");
3539
            ptr = (uint8_t *)env->pc;
3540
            for(i = 0; i < 16; i++) {
3541
                qemu_log(" %02x", ldub(ptr + i));
3542
            }
3543
            qemu_log("\n");
3544
        }
3545
#endif
3546
        count++;
3547
    }
3548
#endif
3549
#if !defined(CONFIG_USER_ONLY)
3550
    if (env->psret == 0) {
3551
        cpu_abort(env, "Trap 0x%02x while interrupts disabled, Error state",
3552
                  env->exception_index);
3553
        return;
3554
    }
3555
#endif
3556
    env->psret = 0;
3557
    cwp = cpu_cwp_dec(env, env->cwp - 1);
3558
    cpu_set_cwp(env, cwp);
3559
    env->regwptr[9] = env->pc;
3560
    env->regwptr[10] = env->npc;
3561
    env->psrps = env->psrs;
3562
    env->psrs = 1;
3563
    env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
3564
    env->pc = env->tbr;
3565
    env->npc = env->pc + 4;
3566
    env->exception_index = -1;
3567
}
3568
#endif
3569

    
3570
#if !defined(CONFIG_USER_ONLY)
3571

    
3572
static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
3573
                                void *retaddr);
3574

    
3575
#define MMUSUFFIX _mmu
3576
#define ALIGNED_ONLY
3577

    
3578
#define SHIFT 0
3579
#include "softmmu_template.h"
3580

    
3581
#define SHIFT 1
3582
#include "softmmu_template.h"
3583

    
3584
#define SHIFT 2
3585
#include "softmmu_template.h"
3586

    
3587
#define SHIFT 3
3588
#include "softmmu_template.h"
3589

    
3590
/* XXX: make it generic ? */
3591
static void cpu_restore_state2(void *retaddr)
3592
{
3593
    TranslationBlock *tb;
3594
    unsigned long pc;
3595

    
3596
    if (retaddr) {
3597
        /* now we have a real cpu fault */
3598
        pc = (unsigned long)retaddr;
3599
        tb = tb_find_pc(pc);
3600
        if (tb) {
3601
            /* the PC is inside the translated code. It means that we have
3602
               a virtual CPU fault */
3603
            cpu_restore_state(tb, env, pc, (void *)(long)env->cond);
3604
        }
3605
    }
3606
}
3607

    
3608
static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
3609
                                void *retaddr)
3610
{
3611
#ifdef DEBUG_UNALIGNED
3612
    printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
3613
           "\n", addr, env->pc);
3614
#endif
3615
    cpu_restore_state2(retaddr);
3616
    raise_exception(TT_UNALIGNED);
3617
}
3618

    
3619
/* try to fill the TLB and return an exception if error. If retaddr is
3620
   NULL, it means that the function was called in C code (i.e. not
3621
   from generated code or from helper.c) */
3622
/* XXX: fix it to restore all registers */
3623
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3624
{
3625
    int ret;
3626
    CPUState *saved_env;
3627

    
3628
    /* XXX: hack to restore env in all cases, even if not called from
3629
       generated code */
3630
    saved_env = env;
3631
    env = cpu_single_env;
3632

    
3633
    ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3634
    if (ret) {
3635
        cpu_restore_state2(retaddr);
3636
        cpu_loop_exit();
3637
    }
3638
    env = saved_env;
3639
}
3640

    
3641
#endif
3642

    
3643
#ifndef TARGET_SPARC64
3644
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
3645
                          int is_asi, int size)
3646
{
3647
    CPUState *saved_env;
3648

    
3649
    /* XXX: hack to restore env in all cases, even if not called from
3650
       generated code */
3651
    saved_env = env;
3652
    env = cpu_single_env;
3653
#ifdef DEBUG_UNASSIGNED
3654
    if (is_asi)
3655
        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
3656
               " asi 0x%02x from " TARGET_FMT_lx "\n",
3657
               is_exec ? "exec" : is_write ? "write" : "read", size,
3658
               size == 1 ? "" : "s", addr, is_asi, env->pc);
3659
    else
3660
        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
3661
               " from " TARGET_FMT_lx "\n",
3662
               is_exec ? "exec" : is_write ? "write" : "read", size,
3663
               size == 1 ? "" : "s", addr, env->pc);
3664
#endif
3665
    if (env->mmuregs[3]) /* Fault status register */
3666
        env->mmuregs[3] = 1; /* overflow (not read before another fault) */
3667
    if (is_asi)
3668
        env->mmuregs[3] |= 1 << 16;
3669
    if (env->psrs)
3670
        env->mmuregs[3] |= 1 << 5;
3671
    if (is_exec)
3672
        env->mmuregs[3] |= 1 << 6;
3673
    if (is_write)
3674
        env->mmuregs[3] |= 1 << 7;
3675
    env->mmuregs[3] |= (5 << 2) | 2;
3676
    env->mmuregs[4] = addr; /* Fault address register */
3677
    if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
3678
        if (is_exec)
3679
            raise_exception(TT_CODE_ACCESS);
3680
        else
3681
            raise_exception(TT_DATA_ACCESS);
3682
    }
3683
    env = saved_env;
3684
}
3685
#else
3686
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
3687
                          int is_asi, int size)
3688
{
3689
    CPUState *saved_env;
3690

    
3691
    /* XXX: hack to restore env in all cases, even if not called from
3692
       generated code */
3693
    saved_env = env;
3694
    env = cpu_single_env;
3695

    
3696
#ifdef DEBUG_UNASSIGNED
3697
    printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
3698
           "\n", addr, env->pc);
3699
#endif
3700

    
3701
    if (is_exec)
3702
        raise_exception(TT_CODE_ACCESS);
3703
    else
3704
        raise_exception(TT_DATA_ACCESS);
3705

    
3706
    env = saved_env;
3707
}
3708
#endif
3709

    
3710
#ifdef TARGET_SPARC64
3711
void helper_tick_set_count(void *opaque, uint64_t count)
3712
{
3713
#if !defined(CONFIG_USER_ONLY)
3714
    cpu_tick_set_count(opaque, count);
3715
#endif
3716
}
3717

    
3718
uint64_t helper_tick_get_count(void *opaque)
3719
{
3720
#if !defined(CONFIG_USER_ONLY)
3721
    return cpu_tick_get_count(opaque);
3722
#else
3723
    return 0;
3724
#endif
3725
}
3726

    
3727
void helper_tick_set_limit(void *opaque, uint64_t limit)
3728
{
3729
#if !defined(CONFIG_USER_ONLY)
3730
    cpu_tick_set_limit(opaque, limit);
3731
#endif
3732
}
3733
#endif