Statistics
| Branch: | Revision:

root / target-sparc / op_helper.c @ 618ba8e6

History | View | Annotate | Download (122.8 kB)

1
#include "exec.h"
2
#include "host-utils.h"
3
#include "helper.h"
4
#include "sysemu.h"
5

    
6
//#define DEBUG_MMU
7
//#define DEBUG_MXCC
8
//#define DEBUG_UNALIGNED
9
//#define DEBUG_UNASSIGNED
10
//#define DEBUG_ASI
11
//#define DEBUG_PCALL
12
//#define DEBUG_PSTATE
13
//#define DEBUG_CACHE_CONTROL
14

    
15
#ifdef DEBUG_MMU
16
#define DPRINTF_MMU(fmt, ...)                                   \
17
    do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
18
#else
19
#define DPRINTF_MMU(fmt, ...) do {} while (0)
20
#endif
21

    
22
#ifdef DEBUG_MXCC
23
#define DPRINTF_MXCC(fmt, ...)                                  \
24
    do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
25
#else
26
#define DPRINTF_MXCC(fmt, ...) do {} while (0)
27
#endif
28

    
29
#ifdef DEBUG_ASI
30
#define DPRINTF_ASI(fmt, ...)                                   \
31
    do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
32
#endif
33

    
34
#ifdef DEBUG_PSTATE
35
#define DPRINTF_PSTATE(fmt, ...)                                   \
36
    do { printf("PSTATE: " fmt , ## __VA_ARGS__); } while (0)
37
#else
38
#define DPRINTF_PSTATE(fmt, ...) do {} while (0)
39
#endif
40

    
41
#ifdef DEBUG_CACHE_CONTROL
42
#define DPRINTF_CACHE_CONTROL(fmt, ...)                                   \
43
    do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
44
#else
45
#define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
46
#endif
47

    
48
#ifdef TARGET_SPARC64
49
#ifndef TARGET_ABI32
50
#define AM_CHECK(env1) ((env1)->pstate & PS_AM)
51
#else
52
#define AM_CHECK(env1) (1)
53
#endif
54
#endif
55

    
56
#define DT0 (env->dt0)
57
#define DT1 (env->dt1)
58
#define QT0 (env->qt0)
59
#define QT1 (env->qt1)
60

    
61
/* Leon3 cache control */
62

    
63
/* Cache control: emulate the behavior of cache control registers but without
64
   any effect on the emulated */
65

    
66
#define CACHE_STATE_MASK 0x3
67
#define CACHE_DISABLED   0x0
68
#define CACHE_FROZEN     0x1
69
#define CACHE_ENABLED    0x3
70

    
71
/* Cache Control register fields */
72

    
73
#define CACHE_CTRL_IF (1 <<  4)  /* Instruction Cache Freeze on Interrupt */
74
#define CACHE_CTRL_DF (1 <<  5)  /* Data Cache Freeze on Interrupt */
75
#define CACHE_CTRL_DP (1 << 14)  /* Data cache flush pending */
76
#define CACHE_CTRL_IP (1 << 15)  /* Instruction cache flush pending */
77
#define CACHE_CTRL_IB (1 << 16)  /* Instruction burst fetch */
78
#define CACHE_CTRL_FI (1 << 21)  /* Flush Instruction cache (Write only) */
79
#define CACHE_CTRL_FD (1 << 22)  /* Flush Data cache (Write only) */
80
#define CACHE_CTRL_DS (1 << 23)  /* Data cache snoop enable */
81

    
82
#if defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
83
static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
84
                          int is_asi, int size);
85
#endif
86

    
87
#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
88
// Calculates TSB pointer value for fault page size 8k or 64k
89
static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
90
                                       uint64_t tag_access_register,
91
                                       int page_size)
92
{
93
    uint64_t tsb_base = tsb_register & ~0x1fffULL;
94
    int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
95
    int tsb_size  = tsb_register & 0xf;
96

    
97
    // discard lower 13 bits which hold tag access context
98
    uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
99

    
100
    // now reorder bits
101
    uint64_t tsb_base_mask = ~0x1fffULL;
102
    uint64_t va = tag_access_va;
103

    
104
    // move va bits to correct position
105
    if (page_size == 8*1024) {
106
        va >>= 9;
107
    } else if (page_size == 64*1024) {
108
        va >>= 12;
109
    }
110

    
111
    if (tsb_size) {
112
        tsb_base_mask <<= tsb_size;
113
    }
114

    
115
    // calculate tsb_base mask and adjust va if split is in use
116
    if (tsb_split) {
117
        if (page_size == 8*1024) {
118
            va &= ~(1ULL << (13 + tsb_size));
119
        } else if (page_size == 64*1024) {
120
            va |= (1ULL << (13 + tsb_size));
121
        }
122
        tsb_base_mask <<= 1;
123
    }
124

    
125
    return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
126
}
127

    
128
// Calculates tag target register value by reordering bits
129
// in tag access register
130
static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
131
{
132
    return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
133
}
134

    
135
static void replace_tlb_entry(SparcTLBEntry *tlb,
136
                              uint64_t tlb_tag, uint64_t tlb_tte,
137
                              CPUState *env1)
138
{
139
    target_ulong mask, size, va, offset;
140

    
141
    // flush page range if translation is valid
142
    if (TTE_IS_VALID(tlb->tte)) {
143

    
144
        mask = 0xffffffffffffe000ULL;
145
        mask <<= 3 * ((tlb->tte >> 61) & 3);
146
        size = ~mask + 1;
147

    
148
        va = tlb->tag & mask;
149

    
150
        for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
151
            tlb_flush_page(env1, va + offset);
152
        }
153
    }
154

    
155
    tlb->tag = tlb_tag;
156
    tlb->tte = tlb_tte;
157
}
158

    
159
static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
160
                      const char* strmmu, CPUState *env1)
161
{
162
    unsigned int i;
163
    target_ulong mask;
164
    uint64_t context;
165

    
166
    int is_demap_context = (demap_addr >> 6) & 1;
167

    
168
    // demap context
169
    switch ((demap_addr >> 4) & 3) {
170
    case 0: // primary
171
        context = env1->dmmu.mmu_primary_context;
172
        break;
173
    case 1: // secondary
174
        context = env1->dmmu.mmu_secondary_context;
175
        break;
176
    case 2: // nucleus
177
        context = 0;
178
        break;
179
    case 3: // reserved
180
    default:
181
        return;
182
    }
183

    
184
    for (i = 0; i < 64; i++) {
185
        if (TTE_IS_VALID(tlb[i].tte)) {
186

    
187
            if (is_demap_context) {
188
                // will remove non-global entries matching context value
189
                if (TTE_IS_GLOBAL(tlb[i].tte) ||
190
                    !tlb_compare_context(&tlb[i], context)) {
191
                    continue;
192
                }
193
            } else {
194
                // demap page
195
                // will remove any entry matching VA
196
                mask = 0xffffffffffffe000ULL;
197
                mask <<= 3 * ((tlb[i].tte >> 61) & 3);
198

    
199
                if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
200
                    continue;
201
                }
202

    
203
                // entry should be global or matching context value
204
                if (!TTE_IS_GLOBAL(tlb[i].tte) &&
205
                    !tlb_compare_context(&tlb[i], context)) {
206
                    continue;
207
                }
208
            }
209

    
210
            replace_tlb_entry(&tlb[i], 0, 0, env1);
211
#ifdef DEBUG_MMU
212
            DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
213
            dump_mmu(stdout, fprintf, env1);
214
#endif
215
        }
216
    }
217
}
218

    
219
static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
220
                                 uint64_t tlb_tag, uint64_t tlb_tte,
221
                                 const char* strmmu, CPUState *env1)
222
{
223
    unsigned int i, replace_used;
224

    
225
    // Try replacing invalid entry
226
    for (i = 0; i < 64; i++) {
227
        if (!TTE_IS_VALID(tlb[i].tte)) {
228
            replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
229
#ifdef DEBUG_MMU
230
            DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
231
            dump_mmu(stdout, fprintf, env1);
232
#endif
233
            return;
234
        }
235
    }
236

    
237
    // All entries are valid, try replacing unlocked entry
238

    
239
    for (replace_used = 0; replace_used < 2; ++replace_used) {
240

    
241
        // Used entries are not replaced on first pass
242

    
243
        for (i = 0; i < 64; i++) {
244
            if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
245

    
246
                replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
247
#ifdef DEBUG_MMU
248
                DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
249
                            strmmu, (replace_used?"used":"unused"), i);
250
                dump_mmu(stdout, fprintf, env1);
251
#endif
252
                return;
253
            }
254
        }
255

    
256
        // Now reset used bit and search for unused entries again
257

    
258
        for (i = 0; i < 64; i++) {
259
            TTE_SET_UNUSED(tlb[i].tte);
260
        }
261
    }
262

    
263
#ifdef DEBUG_MMU
264
    DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
265
#endif
266
    // error state?
267
}
268

    
269
#endif
270

    
271
static inline target_ulong address_mask(CPUState *env1, target_ulong addr)
272
{
273
#ifdef TARGET_SPARC64
274
    if (AM_CHECK(env1))
275
        addr &= 0xffffffffULL;
276
#endif
277
    return addr;
278
}
279

    
280
/* returns true if access using this ASI is to have address translated by MMU
281
   otherwise access is to raw physical address */
282
static inline int is_translating_asi(int asi)
283
{
284
#ifdef TARGET_SPARC64
285
    /* Ultrasparc IIi translating asi
286
       - note this list is defined by cpu implementation
287
     */
288
    switch (asi) {
289
    case 0x04 ... 0x11:
290
    case 0x18 ... 0x19:
291
    case 0x24 ... 0x2C:
292
    case 0x70 ... 0x73:
293
    case 0x78 ... 0x79:
294
    case 0x80 ... 0xFF:
295
        return 1;
296

    
297
    default:
298
        return 0;
299
    }
300
#else
301
    /* TODO: check sparc32 bits */
302
    return 0;
303
#endif
304
}
305

    
306
static inline target_ulong asi_address_mask(CPUState *env1,
307
                                            int asi, target_ulong addr)
308
{
309
    if (is_translating_asi(asi)) {
310
        return address_mask(env, addr);
311
    } else {
312
        return addr;
313
    }
314
}
315

    
316
static void raise_exception(int tt)
317
{
318
    env->exception_index = tt;
319
    cpu_loop_exit();
320
}
321

    
322
void HELPER(raise_exception)(int tt)
323
{
324
    raise_exception(tt);
325
}
326

    
327
void helper_shutdown(void)
328
{
329
#if !defined(CONFIG_USER_ONLY)
330
    qemu_system_shutdown_request();
331
#endif
332
}
333

    
334
void helper_check_align(target_ulong addr, uint32_t align)
335
{
336
    if (addr & align) {
337
#ifdef DEBUG_UNALIGNED
338
    printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
339
           "\n", addr, env->pc);
340
#endif
341
        raise_exception(TT_UNALIGNED);
342
    }
343
}
344

    
345
#define F_HELPER(name, p) void helper_f##name##p(void)
346

    
347
#define F_BINOP(name)                                           \
348
    float32 helper_f ## name ## s (float32 src1, float32 src2)  \
349
    {                                                           \
350
        return float32_ ## name (src1, src2, &env->fp_status);  \
351
    }                                                           \
352
    F_HELPER(name, d)                                           \
353
    {                                                           \
354
        DT0 = float64_ ## name (DT0, DT1, &env->fp_status);     \
355
    }                                                           \
356
    F_HELPER(name, q)                                           \
357
    {                                                           \
358
        QT0 = float128_ ## name (QT0, QT1, &env->fp_status);    \
359
    }
360

    
361
F_BINOP(add);
362
F_BINOP(sub);
363
F_BINOP(mul);
364
F_BINOP(div);
365
#undef F_BINOP
366

    
367
void helper_fsmuld(float32 src1, float32 src2)
368
{
369
    DT0 = float64_mul(float32_to_float64(src1, &env->fp_status),
370
                      float32_to_float64(src2, &env->fp_status),
371
                      &env->fp_status);
372
}
373

    
374
void helper_fdmulq(void)
375
{
376
    QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
377
                       float64_to_float128(DT1, &env->fp_status),
378
                       &env->fp_status);
379
}
380

    
381
float32 helper_fnegs(float32 src)
382
{
383
    return float32_chs(src);
384
}
385

    
386
#ifdef TARGET_SPARC64
387
F_HELPER(neg, d)
388
{
389
    DT0 = float64_chs(DT1);
390
}
391

    
392
F_HELPER(neg, q)
393
{
394
    QT0 = float128_chs(QT1);
395
}
396
#endif
397

    
398
/* Integer to float conversion.  */
399
float32 helper_fitos(int32_t src)
400
{
401
    return int32_to_float32(src, &env->fp_status);
402
}
403

    
404
void helper_fitod(int32_t src)
405
{
406
    DT0 = int32_to_float64(src, &env->fp_status);
407
}
408

    
409
void helper_fitoq(int32_t src)
410
{
411
    QT0 = int32_to_float128(src, &env->fp_status);
412
}
413

    
414
#ifdef TARGET_SPARC64
415
float32 helper_fxtos(void)
416
{
417
    return int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
418
}
419

    
420
F_HELPER(xto, d)
421
{
422
    DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
423
}
424

    
425
F_HELPER(xto, q)
426
{
427
    QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
428
}
429
#endif
430
#undef F_HELPER
431

    
432
/* floating point conversion */
433
float32 helper_fdtos(void)
434
{
435
    return float64_to_float32(DT1, &env->fp_status);
436
}
437

    
438
void helper_fstod(float32 src)
439
{
440
    DT0 = float32_to_float64(src, &env->fp_status);
441
}
442

    
443
float32 helper_fqtos(void)
444
{
445
    return float128_to_float32(QT1, &env->fp_status);
446
}
447

    
448
void helper_fstoq(float32 src)
449
{
450
    QT0 = float32_to_float128(src, &env->fp_status);
451
}
452

    
453
void helper_fqtod(void)
454
{
455
    DT0 = float128_to_float64(QT1, &env->fp_status);
456
}
457

    
458
void helper_fdtoq(void)
459
{
460
    QT0 = float64_to_float128(DT1, &env->fp_status);
461
}
462

    
463
/* Float to integer conversion.  */
464
int32_t helper_fstoi(float32 src)
465
{
466
    return float32_to_int32_round_to_zero(src, &env->fp_status);
467
}
468

    
469
int32_t helper_fdtoi(void)
470
{
471
    return float64_to_int32_round_to_zero(DT1, &env->fp_status);
472
}
473

    
474
int32_t helper_fqtoi(void)
475
{
476
    return float128_to_int32_round_to_zero(QT1, &env->fp_status);
477
}
478

    
479
#ifdef TARGET_SPARC64
480
void helper_fstox(float32 src)
481
{
482
    *((int64_t *)&DT0) = float32_to_int64_round_to_zero(src, &env->fp_status);
483
}
484

    
485
void helper_fdtox(void)
486
{
487
    *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
488
}
489

    
490
void helper_fqtox(void)
491
{
492
    *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
493
}
494

    
495
void helper_faligndata(void)
496
{
497
    uint64_t tmp;
498

    
499
    tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
500
    /* on many architectures a shift of 64 does nothing */
501
    if ((env->gsr & 7) != 0) {
502
        tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
503
    }
504
    *((uint64_t *)&DT0) = tmp;
505
}
506

    
507
#ifdef HOST_WORDS_BIGENDIAN
508
#define VIS_B64(n) b[7 - (n)]
509
#define VIS_W64(n) w[3 - (n)]
510
#define VIS_SW64(n) sw[3 - (n)]
511
#define VIS_L64(n) l[1 - (n)]
512
#define VIS_B32(n) b[3 - (n)]
513
#define VIS_W32(n) w[1 - (n)]
514
#else
515
#define VIS_B64(n) b[n]
516
#define VIS_W64(n) w[n]
517
#define VIS_SW64(n) sw[n]
518
#define VIS_L64(n) l[n]
519
#define VIS_B32(n) b[n]
520
#define VIS_W32(n) w[n]
521
#endif
522

    
523
typedef union {
524
    uint8_t b[8];
525
    uint16_t w[4];
526
    int16_t sw[4];
527
    uint32_t l[2];
528
    float64 d;
529
} vis64;
530

    
531
typedef union {
532
    uint8_t b[4];
533
    uint16_t w[2];
534
    uint32_t l;
535
    float32 f;
536
} vis32;
537

    
538
void helper_fpmerge(void)
539
{
540
    vis64 s, d;
541

    
542
    s.d = DT0;
543
    d.d = DT1;
544

    
545
    // Reverse calculation order to handle overlap
546
    d.VIS_B64(7) = s.VIS_B64(3);
547
    d.VIS_B64(6) = d.VIS_B64(3);
548
    d.VIS_B64(5) = s.VIS_B64(2);
549
    d.VIS_B64(4) = d.VIS_B64(2);
550
    d.VIS_B64(3) = s.VIS_B64(1);
551
    d.VIS_B64(2) = d.VIS_B64(1);
552
    d.VIS_B64(1) = s.VIS_B64(0);
553
    //d.VIS_B64(0) = d.VIS_B64(0);
554

    
555
    DT0 = d.d;
556
}
557

    
558
void helper_fmul8x16(void)
559
{
560
    vis64 s, d;
561
    uint32_t tmp;
562

    
563
    s.d = DT0;
564
    d.d = DT1;
565

    
566
#define PMUL(r)                                                 \
567
    tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r);       \
568
    if ((tmp & 0xff) > 0x7f)                                    \
569
        tmp += 0x100;                                           \
570
    d.VIS_W64(r) = tmp >> 8;
571

    
572
    PMUL(0);
573
    PMUL(1);
574
    PMUL(2);
575
    PMUL(3);
576
#undef PMUL
577

    
578
    DT0 = d.d;
579
}
580

    
581
void helper_fmul8x16al(void)
582
{
583
    vis64 s, d;
584
    uint32_t tmp;
585

    
586
    s.d = DT0;
587
    d.d = DT1;
588

    
589
#define PMUL(r)                                                 \
590
    tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r);       \
591
    if ((tmp & 0xff) > 0x7f)                                    \
592
        tmp += 0x100;                                           \
593
    d.VIS_W64(r) = tmp >> 8;
594

    
595
    PMUL(0);
596
    PMUL(1);
597
    PMUL(2);
598
    PMUL(3);
599
#undef PMUL
600

    
601
    DT0 = d.d;
602
}
603

    
604
void helper_fmul8x16au(void)
605
{
606
    vis64 s, d;
607
    uint32_t tmp;
608

    
609
    s.d = DT0;
610
    d.d = DT1;
611

    
612
#define PMUL(r)                                                 \
613
    tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r);       \
614
    if ((tmp & 0xff) > 0x7f)                                    \
615
        tmp += 0x100;                                           \
616
    d.VIS_W64(r) = tmp >> 8;
617

    
618
    PMUL(0);
619
    PMUL(1);
620
    PMUL(2);
621
    PMUL(3);
622
#undef PMUL
623

    
624
    DT0 = d.d;
625
}
626

    
627
void helper_fmul8sux16(void)
628
{
629
    vis64 s, d;
630
    uint32_t tmp;
631

    
632
    s.d = DT0;
633
    d.d = DT1;
634

    
635
#define PMUL(r)                                                         \
636
    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
637
    if ((tmp & 0xff) > 0x7f)                                            \
638
        tmp += 0x100;                                                   \
639
    d.VIS_W64(r) = tmp >> 8;
640

    
641
    PMUL(0);
642
    PMUL(1);
643
    PMUL(2);
644
    PMUL(3);
645
#undef PMUL
646

    
647
    DT0 = d.d;
648
}
649

    
650
void helper_fmul8ulx16(void)
651
{
652
    vis64 s, d;
653
    uint32_t tmp;
654

    
655
    s.d = DT0;
656
    d.d = DT1;
657

    
658
#define PMUL(r)                                                         \
659
    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
660
    if ((tmp & 0xff) > 0x7f)                                            \
661
        tmp += 0x100;                                                   \
662
    d.VIS_W64(r) = tmp >> 8;
663

    
664
    PMUL(0);
665
    PMUL(1);
666
    PMUL(2);
667
    PMUL(3);
668
#undef PMUL
669

    
670
    DT0 = d.d;
671
}
672

    
673
void helper_fmuld8sux16(void)
674
{
675
    vis64 s, d;
676
    uint32_t tmp;
677

    
678
    s.d = DT0;
679
    d.d = DT1;
680

    
681
#define PMUL(r)                                                         \
682
    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
683
    if ((tmp & 0xff) > 0x7f)                                            \
684
        tmp += 0x100;                                                   \
685
    d.VIS_L64(r) = tmp;
686

    
687
    // Reverse calculation order to handle overlap
688
    PMUL(1);
689
    PMUL(0);
690
#undef PMUL
691

    
692
    DT0 = d.d;
693
}
694

    
695
void helper_fmuld8ulx16(void)
696
{
697
    vis64 s, d;
698
    uint32_t tmp;
699

    
700
    s.d = DT0;
701
    d.d = DT1;
702

    
703
#define PMUL(r)                                                         \
704
    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
705
    if ((tmp & 0xff) > 0x7f)                                            \
706
        tmp += 0x100;                                                   \
707
    d.VIS_L64(r) = tmp;
708

    
709
    // Reverse calculation order to handle overlap
710
    PMUL(1);
711
    PMUL(0);
712
#undef PMUL
713

    
714
    DT0 = d.d;
715
}
716

    
717
void helper_fexpand(void)
718
{
719
    vis32 s;
720
    vis64 d;
721

    
722
    s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
723
    d.d = DT1;
724
    d.VIS_W64(0) = s.VIS_B32(0) << 4;
725
    d.VIS_W64(1) = s.VIS_B32(1) << 4;
726
    d.VIS_W64(2) = s.VIS_B32(2) << 4;
727
    d.VIS_W64(3) = s.VIS_B32(3) << 4;
728

    
729
    DT0 = d.d;
730
}
731

    
732
#define VIS_HELPER(name, F)                             \
733
    void name##16(void)                                 \
734
    {                                                   \
735
        vis64 s, d;                                     \
736
                                                        \
737
        s.d = DT0;                                      \
738
        d.d = DT1;                                      \
739
                                                        \
740
        d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0));   \
741
        d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1));   \
742
        d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2));   \
743
        d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3));   \
744
                                                        \
745
        DT0 = d.d;                                      \
746
    }                                                   \
747
                                                        \
748
    uint32_t name##16s(uint32_t src1, uint32_t src2)    \
749
    {                                                   \
750
        vis32 s, d;                                     \
751
                                                        \
752
        s.l = src1;                                     \
753
        d.l = src2;                                     \
754
                                                        \
755
        d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0));   \
756
        d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1));   \
757
                                                        \
758
        return d.l;                                     \
759
    }                                                   \
760
                                                        \
761
    void name##32(void)                                 \
762
    {                                                   \
763
        vis64 s, d;                                     \
764
                                                        \
765
        s.d = DT0;                                      \
766
        d.d = DT1;                                      \
767
                                                        \
768
        d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0));   \
769
        d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1));   \
770
                                                        \
771
        DT0 = d.d;                                      \
772
    }                                                   \
773
                                                        \
774
    uint32_t name##32s(uint32_t src1, uint32_t src2)    \
775
    {                                                   \
776
        vis32 s, d;                                     \
777
                                                        \
778
        s.l = src1;                                     \
779
        d.l = src2;                                     \
780
                                                        \
781
        d.l = F(d.l, s.l);                              \
782
                                                        \
783
        return d.l;                                     \
784
    }
785

    
786
#define FADD(a, b) ((a) + (b))
787
#define FSUB(a, b) ((a) - (b))
788
VIS_HELPER(helper_fpadd, FADD)
789
VIS_HELPER(helper_fpsub, FSUB)
790

    
791
#define VIS_CMPHELPER(name, F)                                        \
792
    void name##16(void)                                           \
793
    {                                                             \
794
        vis64 s, d;                                               \
795
                                                                  \
796
        s.d = DT0;                                                \
797
        d.d = DT1;                                                \
798
                                                                  \
799
        d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0;       \
800
        d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0;      \
801
        d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0;      \
802
        d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0;      \
803
                                                                  \
804
        DT0 = d.d;                                                \
805
    }                                                             \
806
                                                                  \
807
    void name##32(void)                                           \
808
    {                                                             \
809
        vis64 s, d;                                               \
810
                                                                  \
811
        s.d = DT0;                                                \
812
        d.d = DT1;                                                \
813
                                                                  \
814
        d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0;       \
815
        d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0;      \
816
                                                                  \
817
        DT0 = d.d;                                                \
818
    }
819

    
820
#define FCMPGT(a, b) ((a) > (b))
821
#define FCMPEQ(a, b) ((a) == (b))
822
#define FCMPLE(a, b) ((a) <= (b))
823
#define FCMPNE(a, b) ((a) != (b))
824

    
825
VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
826
VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
827
VIS_CMPHELPER(helper_fcmple, FCMPLE)
828
VIS_CMPHELPER(helper_fcmpne, FCMPNE)
829
#endif
830

    
831
void helper_check_ieee_exceptions(void)
832
{
833
    target_ulong status;
834

    
835
    status = get_float_exception_flags(&env->fp_status);
836
    if (status) {
837
        /* Copy IEEE 754 flags into FSR */
838
        if (status & float_flag_invalid)
839
            env->fsr |= FSR_NVC;
840
        if (status & float_flag_overflow)
841
            env->fsr |= FSR_OFC;
842
        if (status & float_flag_underflow)
843
            env->fsr |= FSR_UFC;
844
        if (status & float_flag_divbyzero)
845
            env->fsr |= FSR_DZC;
846
        if (status & float_flag_inexact)
847
            env->fsr |= FSR_NXC;
848

    
849
        if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
850
            /* Unmasked exception, generate a trap */
851
            env->fsr |= FSR_FTT_IEEE_EXCP;
852
            raise_exception(TT_FP_EXCP);
853
        } else {
854
            /* Accumulate exceptions */
855
            env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
856
        }
857
    }
858
}
859

    
860
void helper_clear_float_exceptions(void)
861
{
862
    set_float_exception_flags(0, &env->fp_status);
863
}
864

    
865
float32 helper_fabss(float32 src)
866
{
867
    return float32_abs(src);
868
}
869

    
870
#ifdef TARGET_SPARC64
871
void helper_fabsd(void)
872
{
873
    DT0 = float64_abs(DT1);
874
}
875

    
876
void helper_fabsq(void)
877
{
878
    QT0 = float128_abs(QT1);
879
}
880
#endif
881

    
882
float32 helper_fsqrts(float32 src)
883
{
884
    return float32_sqrt(src, &env->fp_status);
885
}
886

    
887
void helper_fsqrtd(void)
888
{
889
    DT0 = float64_sqrt(DT1, &env->fp_status);
890
}
891

    
892
void helper_fsqrtq(void)
893
{
894
    QT0 = float128_sqrt(QT1, &env->fp_status);
895
}
896

    
897
#define GEN_FCMP(name, size, reg1, reg2, FS, E)                         \
898
    void glue(helper_, name) (void)                                     \
899
    {                                                                   \
900
        env->fsr &= FSR_FTT_NMASK;                                      \
901
        if (E && (glue(size, _is_any_nan)(reg1) ||                      \
902
                     glue(size, _is_any_nan)(reg2)) &&                  \
903
            (env->fsr & FSR_NVM)) {                                     \
904
            env->fsr |= FSR_NVC;                                        \
905
            env->fsr |= FSR_FTT_IEEE_EXCP;                              \
906
            raise_exception(TT_FP_EXCP);                                \
907
        }                                                               \
908
        switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) {   \
909
        case float_relation_unordered:                                  \
910
            if ((env->fsr & FSR_NVM)) {                                 \
911
                env->fsr |= FSR_NVC;                                    \
912
                env->fsr |= FSR_FTT_IEEE_EXCP;                          \
913
                raise_exception(TT_FP_EXCP);                            \
914
            } else {                                                    \
915
                env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);             \
916
                env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS;                \
917
                env->fsr |= FSR_NVA;                                    \
918
            }                                                           \
919
            break;                                                      \
920
        case float_relation_less:                                       \
921
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
922
            env->fsr |= FSR_FCC0 << FS;                                 \
923
            break;                                                      \
924
        case float_relation_greater:                                    \
925
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
926
            env->fsr |= FSR_FCC1 << FS;                                 \
927
            break;                                                      \
928
        default:                                                        \
929
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
930
            break;                                                      \
931
        }                                                               \
932
    }
933
#define GEN_FCMPS(name, size, FS, E)                                    \
934
    void glue(helper_, name)(float32 src1, float32 src2)                \
935
    {                                                                   \
936
        env->fsr &= FSR_FTT_NMASK;                                      \
937
        if (E && (glue(size, _is_any_nan)(src1) ||                      \
938
                     glue(size, _is_any_nan)(src2)) &&                  \
939
            (env->fsr & FSR_NVM)) {                                     \
940
            env->fsr |= FSR_NVC;                                        \
941
            env->fsr |= FSR_FTT_IEEE_EXCP;                              \
942
            raise_exception(TT_FP_EXCP);                                \
943
        }                                                               \
944
        switch (glue(size, _compare) (src1, src2, &env->fp_status)) {   \
945
        case float_relation_unordered:                                  \
946
            if ((env->fsr & FSR_NVM)) {                                 \
947
                env->fsr |= FSR_NVC;                                    \
948
                env->fsr |= FSR_FTT_IEEE_EXCP;                          \
949
                raise_exception(TT_FP_EXCP);                            \
950
            } else {                                                    \
951
                env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);             \
952
                env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS;                \
953
                env->fsr |= FSR_NVA;                                    \
954
            }                                                           \
955
            break;                                                      \
956
        case float_relation_less:                                       \
957
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
958
            env->fsr |= FSR_FCC0 << FS;                                 \
959
            break;                                                      \
960
        case float_relation_greater:                                    \
961
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
962
            env->fsr |= FSR_FCC1 << FS;                                 \
963
            break;                                                      \
964
        default:                                                        \
965
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
966
            break;                                                      \
967
        }                                                               \
968
    }
969

    
970
GEN_FCMPS(fcmps, float32, 0, 0);
971
GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
972

    
973
GEN_FCMPS(fcmpes, float32, 0, 1);
974
GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
975

    
976
GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
977
GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
978

    
979
static uint32_t compute_all_flags(void)
980
{
981
    return env->psr & PSR_ICC;
982
}
983

    
984
static uint32_t compute_C_flags(void)
985
{
986
    return env->psr & PSR_CARRY;
987
}
988

    
989
static inline uint32_t get_NZ_icc(int32_t dst)
990
{
991
    uint32_t ret = 0;
992

    
993
    if (dst == 0) {
994
        ret = PSR_ZERO;
995
    } else if (dst < 0) {
996
        ret = PSR_NEG;
997
    }
998
    return ret;
999
}
1000

    
1001
#ifdef TARGET_SPARC64
1002
static uint32_t compute_all_flags_xcc(void)
1003
{
1004
    return env->xcc & PSR_ICC;
1005
}
1006

    
1007
static uint32_t compute_C_flags_xcc(void)
1008
{
1009
    return env->xcc & PSR_CARRY;
1010
}
1011

    
1012
static inline uint32_t get_NZ_xcc(target_long dst)
1013
{
1014
    uint32_t ret = 0;
1015

    
1016
    if (!dst) {
1017
        ret = PSR_ZERO;
1018
    } else if (dst < 0) {
1019
        ret = PSR_NEG;
1020
    }
1021
    return ret;
1022
}
1023
#endif
1024

    
1025
static inline uint32_t get_V_div_icc(target_ulong src2)
1026
{
1027
    uint32_t ret = 0;
1028

    
1029
    if (src2 != 0) {
1030
        ret = PSR_OVF;
1031
    }
1032
    return ret;
1033
}
1034

    
1035
static uint32_t compute_all_div(void)
1036
{
1037
    uint32_t ret;
1038

    
1039
    ret = get_NZ_icc(CC_DST);
1040
    ret |= get_V_div_icc(CC_SRC2);
1041
    return ret;
1042
}
1043

    
1044
static uint32_t compute_C_div(void)
1045
{
1046
    return 0;
1047
}
1048

    
1049
static inline uint32_t get_C_add_icc(uint32_t dst, uint32_t src1)
1050
{
1051
    uint32_t ret = 0;
1052

    
1053
    if (dst < src1) {
1054
        ret = PSR_CARRY;
1055
    }
1056
    return ret;
1057
}
1058

    
1059
static inline uint32_t get_C_addx_icc(uint32_t dst, uint32_t src1,
1060
                                      uint32_t src2)
1061
{
1062
    uint32_t ret = 0;
1063

    
1064
    if (((src1 & src2) | (~dst & (src1 | src2))) & (1U << 31)) {
1065
        ret = PSR_CARRY;
1066
    }
1067
    return ret;
1068
}
1069

    
1070
static inline uint32_t get_V_add_icc(uint32_t dst, uint32_t src1,
1071
                                     uint32_t src2)
1072
{
1073
    uint32_t ret = 0;
1074

    
1075
    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1U << 31)) {
1076
        ret = PSR_OVF;
1077
    }
1078
    return ret;
1079
}
1080

    
1081
#ifdef TARGET_SPARC64
1082
static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1)
1083
{
1084
    uint32_t ret = 0;
1085

    
1086
    if (dst < src1) {
1087
        ret = PSR_CARRY;
1088
    }
1089
    return ret;
1090
}
1091

    
1092
static inline uint32_t get_C_addx_xcc(target_ulong dst, target_ulong src1,
1093
                                      target_ulong src2)
1094
{
1095
    uint32_t ret = 0;
1096

    
1097
    if (((src1 & src2) | (~dst & (src1 | src2))) & (1ULL << 63)) {
1098
        ret = PSR_CARRY;
1099
    }
1100
    return ret;
1101
}
1102

    
1103
static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1,
1104
                                         target_ulong src2)
1105
{
1106
    uint32_t ret = 0;
1107

    
1108
    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63)) {
1109
        ret = PSR_OVF;
1110
    }
1111
    return ret;
1112
}
1113

    
1114
static uint32_t compute_all_add_xcc(void)
1115
{
1116
    uint32_t ret;
1117

    
1118
    ret = get_NZ_xcc(CC_DST);
1119
    ret |= get_C_add_xcc(CC_DST, CC_SRC);
1120
    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1121
    return ret;
1122
}
1123

    
1124
static uint32_t compute_C_add_xcc(void)
1125
{
1126
    return get_C_add_xcc(CC_DST, CC_SRC);
1127
}
1128
#endif
1129

    
1130
static uint32_t compute_all_add(void)
1131
{
1132
    uint32_t ret;
1133

    
1134
    ret = get_NZ_icc(CC_DST);
1135
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1136
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1137
    return ret;
1138
}
1139

    
1140
static uint32_t compute_C_add(void)
1141
{
1142
    return get_C_add_icc(CC_DST, CC_SRC);
1143
}
1144

    
1145
#ifdef TARGET_SPARC64
1146
static uint32_t compute_all_addx_xcc(void)
1147
{
1148
    uint32_t ret;
1149

    
1150
    ret = get_NZ_xcc(CC_DST);
1151
    ret |= get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1152
    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1153
    return ret;
1154
}
1155

    
1156
static uint32_t compute_C_addx_xcc(void)
1157
{
1158
    uint32_t ret;
1159

    
1160
    ret = get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1161
    return ret;
1162
}
1163
#endif
1164

    
1165
static uint32_t compute_all_addx(void)
1166
{
1167
    uint32_t ret;
1168

    
1169
    ret = get_NZ_icc(CC_DST);
1170
    ret |= get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1171
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1172
    return ret;
1173
}
1174

    
1175
static uint32_t compute_C_addx(void)
1176
{
1177
    uint32_t ret;
1178

    
1179
    ret = get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1180
    return ret;
1181
}
1182

    
1183
static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2)
1184
{
1185
    uint32_t ret = 0;
1186

    
1187
    if ((src1 | src2) & 0x3) {
1188
        ret = PSR_OVF;
1189
    }
1190
    return ret;
1191
}
1192

    
1193
static uint32_t compute_all_tadd(void)
1194
{
1195
    uint32_t ret;
1196

    
1197
    ret = get_NZ_icc(CC_DST);
1198
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1199
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1200
    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1201
    return ret;
1202
}
1203

    
1204
static uint32_t compute_all_taddtv(void)
1205
{
1206
    uint32_t ret;
1207

    
1208
    ret = get_NZ_icc(CC_DST);
1209
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1210
    return ret;
1211
}
1212

    
1213
static inline uint32_t get_C_sub_icc(uint32_t src1, uint32_t src2)
1214
{
1215
    uint32_t ret = 0;
1216

    
1217
    if (src1 < src2) {
1218
        ret = PSR_CARRY;
1219
    }
1220
    return ret;
1221
}
1222

    
1223
static inline uint32_t get_C_subx_icc(uint32_t dst, uint32_t src1,
1224
                                      uint32_t src2)
1225
{
1226
    uint32_t ret = 0;
1227

    
1228
    if (((~src1 & src2) | (dst & (~src1 | src2))) & (1U << 31)) {
1229
        ret = PSR_CARRY;
1230
    }
1231
    return ret;
1232
}
1233

    
1234
static inline uint32_t get_V_sub_icc(uint32_t dst, uint32_t src1,
1235
                                     uint32_t src2)
1236
{
1237
    uint32_t ret = 0;
1238

    
1239
    if (((src1 ^ src2) & (src1 ^ dst)) & (1U << 31)) {
1240
        ret = PSR_OVF;
1241
    }
1242
    return ret;
1243
}
1244

    
1245

    
1246
#ifdef TARGET_SPARC64
1247
static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2)
1248
{
1249
    uint32_t ret = 0;
1250

    
1251
    if (src1 < src2) {
1252
        ret = PSR_CARRY;
1253
    }
1254
    return ret;
1255
}
1256

    
1257
static inline uint32_t get_C_subx_xcc(target_ulong dst, target_ulong src1,
1258
                                      target_ulong src2)
1259
{
1260
    uint32_t ret = 0;
1261

    
1262
    if (((~src1 & src2) | (dst & (~src1 | src2))) & (1ULL << 63)) {
1263
        ret = PSR_CARRY;
1264
    }
1265
    return ret;
1266
}
1267

    
1268
static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1,
1269
                                     target_ulong src2)
1270
{
1271
    uint32_t ret = 0;
1272

    
1273
    if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63)) {
1274
        ret = PSR_OVF;
1275
    }
1276
    return ret;
1277
}
1278

    
1279
static uint32_t compute_all_sub_xcc(void)
1280
{
1281
    uint32_t ret;
1282

    
1283
    ret = get_NZ_xcc(CC_DST);
1284
    ret |= get_C_sub_xcc(CC_SRC, CC_SRC2);
1285
    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1286
    return ret;
1287
}
1288

    
1289
static uint32_t compute_C_sub_xcc(void)
1290
{
1291
    return get_C_sub_xcc(CC_SRC, CC_SRC2);
1292
}
1293
#endif
1294

    
1295
static uint32_t compute_all_sub(void)
1296
{
1297
    uint32_t ret;
1298

    
1299
    ret = get_NZ_icc(CC_DST);
1300
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1301
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1302
    return ret;
1303
}
1304

    
1305
static uint32_t compute_C_sub(void)
1306
{
1307
    return get_C_sub_icc(CC_SRC, CC_SRC2);
1308
}
1309

    
1310
#ifdef TARGET_SPARC64
1311
static uint32_t compute_all_subx_xcc(void)
1312
{
1313
    uint32_t ret;
1314

    
1315
    ret = get_NZ_xcc(CC_DST);
1316
    ret |= get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1317
    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1318
    return ret;
1319
}
1320

    
1321
static uint32_t compute_C_subx_xcc(void)
1322
{
1323
    uint32_t ret;
1324

    
1325
    ret = get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1326
    return ret;
1327
}
1328
#endif
1329

    
1330
static uint32_t compute_all_subx(void)
1331
{
1332
    uint32_t ret;
1333

    
1334
    ret = get_NZ_icc(CC_DST);
1335
    ret |= get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1336
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1337
    return ret;
1338
}
1339

    
1340
static uint32_t compute_C_subx(void)
1341
{
1342
    uint32_t ret;
1343

    
1344
    ret = get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1345
    return ret;
1346
}
1347

    
1348
static uint32_t compute_all_tsub(void)
1349
{
1350
    uint32_t ret;
1351

    
1352
    ret = get_NZ_icc(CC_DST);
1353
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1354
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1355
    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1356
    return ret;
1357
}
1358

    
1359
static uint32_t compute_all_tsubtv(void)
1360
{
1361
    uint32_t ret;
1362

    
1363
    ret = get_NZ_icc(CC_DST);
1364
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1365
    return ret;
1366
}
1367

    
1368
static uint32_t compute_all_logic(void)
1369
{
1370
    return get_NZ_icc(CC_DST);
1371
}
1372

    
1373
static uint32_t compute_C_logic(void)
1374
{
1375
    return 0;
1376
}
1377

    
1378
#ifdef TARGET_SPARC64
1379
static uint32_t compute_all_logic_xcc(void)
1380
{
1381
    return get_NZ_xcc(CC_DST);
1382
}
1383
#endif
1384

    
1385
typedef struct CCTable {
1386
    uint32_t (*compute_all)(void); /* return all the flags */
1387
    uint32_t (*compute_c)(void);  /* return the C flag */
1388
} CCTable;
1389

    
1390
static const CCTable icc_table[CC_OP_NB] = {
1391
    /* CC_OP_DYNAMIC should never happen */
1392
    [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags },
1393
    [CC_OP_DIV] = { compute_all_div, compute_C_div },
1394
    [CC_OP_ADD] = { compute_all_add, compute_C_add },
1395
    [CC_OP_ADDX] = { compute_all_addx, compute_C_addx },
1396
    [CC_OP_TADD] = { compute_all_tadd, compute_C_add },
1397
    [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_add },
1398
    [CC_OP_SUB] = { compute_all_sub, compute_C_sub },
1399
    [CC_OP_SUBX] = { compute_all_subx, compute_C_subx },
1400
    [CC_OP_TSUB] = { compute_all_tsub, compute_C_sub },
1401
    [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_sub },
1402
    [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic },
1403
};
1404

    
1405
#ifdef TARGET_SPARC64
1406
static const CCTable xcc_table[CC_OP_NB] = {
1407
    /* CC_OP_DYNAMIC should never happen */
1408
    [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc },
1409
    [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic },
1410
    [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc },
1411
    [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc },
1412
    [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc },
1413
    [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc },
1414
    [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1415
    [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc },
1416
    [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1417
    [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc },
1418
    [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic },
1419
};
1420
#endif
1421

    
1422
void helper_compute_psr(void)
1423
{
1424
    uint32_t new_psr;
1425

    
1426
    new_psr = icc_table[CC_OP].compute_all();
1427
    env->psr = new_psr;
1428
#ifdef TARGET_SPARC64
1429
    new_psr = xcc_table[CC_OP].compute_all();
1430
    env->xcc = new_psr;
1431
#endif
1432
    CC_OP = CC_OP_FLAGS;
1433
}
1434

    
1435
uint32_t helper_compute_C_icc(void)
1436
{
1437
    uint32_t ret;
1438

    
1439
    ret = icc_table[CC_OP].compute_c() >> PSR_CARRY_SHIFT;
1440
    return ret;
1441
}
1442

    
1443
static inline void memcpy32(target_ulong *dst, const target_ulong *src)
1444
{
1445
    dst[0] = src[0];
1446
    dst[1] = src[1];
1447
    dst[2] = src[2];
1448
    dst[3] = src[3];
1449
    dst[4] = src[4];
1450
    dst[5] = src[5];
1451
    dst[6] = src[6];
1452
    dst[7] = src[7];
1453
}
1454

    
1455
static void set_cwp(int new_cwp)
1456
{
1457
    /* put the modified wrap registers at their proper location */
1458
    if (env->cwp == env->nwindows - 1) {
1459
        memcpy32(env->regbase, env->regbase + env->nwindows * 16);
1460
    }
1461
    env->cwp = new_cwp;
1462

    
1463
    /* put the wrap registers at their temporary location */
1464
    if (new_cwp == env->nwindows - 1) {
1465
        memcpy32(env->regbase + env->nwindows * 16, env->regbase);
1466
    }
1467
    env->regwptr = env->regbase + (new_cwp * 16);
1468
}
1469

    
1470
void cpu_set_cwp(CPUState *env1, int new_cwp)
1471
{
1472
    CPUState *saved_env;
1473

    
1474
    saved_env = env;
1475
    env = env1;
1476
    set_cwp(new_cwp);
1477
    env = saved_env;
1478
}
1479

    
1480
static target_ulong get_psr(void)
1481
{
1482
    helper_compute_psr();
1483

    
1484
#if !defined (TARGET_SPARC64)
1485
    return env->version | (env->psr & PSR_ICC) |
1486
        (env->psref? PSR_EF : 0) |
1487
        (env->psrpil << 8) |
1488
        (env->psrs? PSR_S : 0) |
1489
        (env->psrps? PSR_PS : 0) |
1490
        (env->psret? PSR_ET : 0) | env->cwp;
1491
#else
1492
    return env->psr & PSR_ICC;
1493
#endif
1494
}
1495

    
1496
target_ulong cpu_get_psr(CPUState *env1)
1497
{
1498
    CPUState *saved_env;
1499
    target_ulong ret;
1500

    
1501
    saved_env = env;
1502
    env = env1;
1503
    ret = get_psr();
1504
    env = saved_env;
1505
    return ret;
1506
}
1507

    
1508
static void put_psr(target_ulong val)
1509
{
1510
    env->psr = val & PSR_ICC;
1511
#if !defined (TARGET_SPARC64)
1512
    env->psref = (val & PSR_EF)? 1 : 0;
1513
    env->psrpil = (val & PSR_PIL) >> 8;
1514
#endif
1515
#if ((!defined (TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY))
1516
    cpu_check_irqs(env);
1517
#endif
1518
#if !defined (TARGET_SPARC64)
1519
    env->psrs = (val & PSR_S)? 1 : 0;
1520
    env->psrps = (val & PSR_PS)? 1 : 0;
1521
    env->psret = (val & PSR_ET)? 1 : 0;
1522
    set_cwp(val & PSR_CWP);
1523
#endif
1524
    env->cc_op = CC_OP_FLAGS;
1525
}
1526

    
1527
void cpu_put_psr(CPUState *env1, target_ulong val)
1528
{
1529
    CPUState *saved_env;
1530

    
1531
    saved_env = env;
1532
    env = env1;
1533
    put_psr(val);
1534
    env = saved_env;
1535
}
1536

    
1537
static int cwp_inc(int cwp)
1538
{
1539
    if (unlikely(cwp >= env->nwindows)) {
1540
        cwp -= env->nwindows;
1541
    }
1542
    return cwp;
1543
}
1544

    
1545
int cpu_cwp_inc(CPUState *env1, int cwp)
1546
{
1547
    CPUState *saved_env;
1548
    target_ulong ret;
1549

    
1550
    saved_env = env;
1551
    env = env1;
1552
    ret = cwp_inc(cwp);
1553
    env = saved_env;
1554
    return ret;
1555
}
1556

    
1557
static int cwp_dec(int cwp)
1558
{
1559
    if (unlikely(cwp < 0)) {
1560
        cwp += env->nwindows;
1561
    }
1562
    return cwp;
1563
}
1564

    
1565
int cpu_cwp_dec(CPUState *env1, int cwp)
1566
{
1567
    CPUState *saved_env;
1568
    target_ulong ret;
1569

    
1570
    saved_env = env;
1571
    env = env1;
1572
    ret = cwp_dec(cwp);
1573
    env = saved_env;
1574
    return ret;
1575
}
1576

    
1577
#ifdef TARGET_SPARC64
1578
GEN_FCMPS(fcmps_fcc1, float32, 22, 0);
1579
GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
1580
GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
1581

    
1582
GEN_FCMPS(fcmps_fcc2, float32, 24, 0);
1583
GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
1584
GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
1585

    
1586
GEN_FCMPS(fcmps_fcc3, float32, 26, 0);
1587
GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
1588
GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
1589

    
1590
GEN_FCMPS(fcmpes_fcc1, float32, 22, 1);
1591
GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
1592
GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
1593

    
1594
GEN_FCMPS(fcmpes_fcc2, float32, 24, 1);
1595
GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
1596
GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
1597

    
1598
GEN_FCMPS(fcmpes_fcc3, float32, 26, 1);
1599
GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
1600
GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
1601
#endif
1602
#undef GEN_FCMPS
1603

    
1604
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
1605
    defined(DEBUG_MXCC)
1606
static void dump_mxcc(CPUState *env)
1607
{
1608
    printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1609
           "\n",
1610
           env->mxccdata[0], env->mxccdata[1],
1611
           env->mxccdata[2], env->mxccdata[3]);
1612
    printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1613
           "\n"
1614
           "          %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1615
           "\n",
1616
           env->mxccregs[0], env->mxccregs[1],
1617
           env->mxccregs[2], env->mxccregs[3],
1618
           env->mxccregs[4], env->mxccregs[5],
1619
           env->mxccregs[6], env->mxccregs[7]);
1620
}
1621
#endif
1622

    
1623
#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
1624
    && defined(DEBUG_ASI)
1625
static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
1626
                     uint64_t r1)
1627
{
1628
    switch (size)
1629
    {
1630
    case 1:
1631
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
1632
                    addr, asi, r1 & 0xff);
1633
        break;
1634
    case 2:
1635
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
1636
                    addr, asi, r1 & 0xffff);
1637
        break;
1638
    case 4:
1639
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
1640
                    addr, asi, r1 & 0xffffffff);
1641
        break;
1642
    case 8:
1643
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
1644
                    addr, asi, r1);
1645
        break;
1646
    }
1647
}
1648
#endif
1649

    
1650
#ifndef TARGET_SPARC64
1651
#ifndef CONFIG_USER_ONLY
1652

    
1653

    
1654
/* Leon3 cache control */
1655

    
1656
static void leon3_cache_control_int(void)
1657
{
1658
    uint32_t state = 0;
1659

    
1660
    if (env->cache_control & CACHE_CTRL_IF) {
1661
        /* Instruction cache state */
1662
        state = env->cache_control & CACHE_STATE_MASK;
1663
        if (state == CACHE_ENABLED) {
1664
            state = CACHE_FROZEN;
1665
            DPRINTF_CACHE_CONTROL("Instruction cache: freeze\n");
1666
        }
1667

    
1668
        env->cache_control &= ~CACHE_STATE_MASK;
1669
        env->cache_control |= state;
1670
    }
1671

    
1672
    if (env->cache_control & CACHE_CTRL_DF) {
1673
        /* Data cache state */
1674
        state = (env->cache_control >> 2) & CACHE_STATE_MASK;
1675
        if (state == CACHE_ENABLED) {
1676
            state = CACHE_FROZEN;
1677
            DPRINTF_CACHE_CONTROL("Data cache: freeze\n");
1678
        }
1679

    
1680
        env->cache_control &= ~(CACHE_STATE_MASK << 2);
1681
        env->cache_control |= (state << 2);
1682
    }
1683
}
1684

    
1685
static void leon3_cache_control_st(target_ulong addr, uint64_t val, int size)
1686
{
1687
    DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
1688
                          addr, val, size);
1689

    
1690
    if (size != 4) {
1691
        DPRINTF_CACHE_CONTROL("32bits only\n");
1692
        return;
1693
    }
1694

    
1695
    switch (addr) {
1696
    case 0x00:              /* Cache control */
1697

    
1698
        /* These values must always be read as zeros */
1699
        val &= ~CACHE_CTRL_FD;
1700
        val &= ~CACHE_CTRL_FI;
1701
        val &= ~CACHE_CTRL_IB;
1702
        val &= ~CACHE_CTRL_IP;
1703
        val &= ~CACHE_CTRL_DP;
1704

    
1705
        env->cache_control = val;
1706
        break;
1707
    case 0x04:              /* Instruction cache configuration */
1708
    case 0x08:              /* Data cache configuration */
1709
        /* Read Only */
1710
        break;
1711
    default:
1712
        DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
1713
        break;
1714
    };
1715
}
1716

    
1717
static uint64_t leon3_cache_control_ld(target_ulong addr, int size)
1718
{
1719
    uint64_t ret = 0;
1720

    
1721
    if (size != 4) {
1722
        DPRINTF_CACHE_CONTROL("32bits only\n");
1723
        return 0;
1724
    }
1725

    
1726
    switch (addr) {
1727
    case 0x00:              /* Cache control */
1728
        ret = env->cache_control;
1729
        break;
1730

    
1731
        /* Configuration registers are read and only always keep those
1732
           predefined values */
1733

    
1734
    case 0x04:              /* Instruction cache configuration */
1735
        ret = 0x10220000;
1736
        break;
1737
    case 0x08:              /* Data cache configuration */
1738
        ret = 0x18220000;
1739
        break;
1740
    default:
1741
        DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
1742
        break;
1743
    };
1744
    DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
1745
                          addr, ret, size);
1746
    return ret;
1747
}
1748

    
1749
void leon3_irq_manager(void *irq_manager, int intno)
1750
{
1751
    leon3_irq_ack(irq_manager, intno);
1752
    leon3_cache_control_int();
1753
}
1754

    
1755
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1756
{
1757
    uint64_t ret = 0;
1758
#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
1759
    uint32_t last_addr = addr;
1760
#endif
1761

    
1762
    helper_check_align(addr, size - 1);
1763
    switch (asi) {
1764
    case 2: /* SuperSparc MXCC registers and Leon3 cache control */
1765
        switch (addr) {
1766
        case 0x00:          /* Leon3 Cache Control */
1767
        case 0x08:          /* Leon3 Instruction Cache config */
1768
        case 0x0C:          /* Leon3 Date Cache config */
1769
            if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
1770
                ret = leon3_cache_control_ld(addr, size);
1771
            }
1772
            break;
1773
        case 0x01c00a00: /* MXCC control register */
1774
            if (size == 8)
1775
                ret = env->mxccregs[3];
1776
            else
1777
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1778
                             size);
1779
            break;
1780
        case 0x01c00a04: /* MXCC control register */
1781
            if (size == 4)
1782
                ret = env->mxccregs[3];
1783
            else
1784
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1785
                             size);
1786
            break;
1787
        case 0x01c00c00: /* Module reset register */
1788
            if (size == 8) {
1789
                ret = env->mxccregs[5];
1790
                // should we do something here?
1791
            } else
1792
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1793
                             size);
1794
            break;
1795
        case 0x01c00f00: /* MBus port address register */
1796
            if (size == 8)
1797
                ret = env->mxccregs[7];
1798
            else
1799
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1800
                             size);
1801
            break;
1802
        default:
1803
            DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1804
                         size);
1805
            break;
1806
        }
1807
        DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
1808
                     "addr = %08x -> ret = %" PRIx64 ","
1809
                     "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
1810
#ifdef DEBUG_MXCC
1811
        dump_mxcc(env);
1812
#endif
1813
        break;
1814
    case 3: /* MMU probe */
1815
        {
1816
            int mmulev;
1817

    
1818
            mmulev = (addr >> 8) & 15;
1819
            if (mmulev > 4)
1820
                ret = 0;
1821
            else
1822
                ret = mmu_probe(env, addr, mmulev);
1823
            DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
1824
                        addr, mmulev, ret);
1825
        }
1826
        break;
1827
    case 4: /* read MMU regs */
1828
        {
1829
            int reg = (addr >> 8) & 0x1f;
1830

    
1831
            ret = env->mmuregs[reg];
1832
            if (reg == 3) /* Fault status cleared on read */
1833
                env->mmuregs[3] = 0;
1834
            else if (reg == 0x13) /* Fault status read */
1835
                ret = env->mmuregs[3];
1836
            else if (reg == 0x14) /* Fault address read */
1837
                ret = env->mmuregs[4];
1838
            DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
1839
        }
1840
        break;
1841
    case 5: // Turbosparc ITLB Diagnostic
1842
    case 6: // Turbosparc DTLB Diagnostic
1843
    case 7: // Turbosparc IOTLB Diagnostic
1844
        break;
1845
    case 9: /* Supervisor code access */
1846
        switch(size) {
1847
        case 1:
1848
            ret = ldub_code(addr);
1849
            break;
1850
        case 2:
1851
            ret = lduw_code(addr);
1852
            break;
1853
        default:
1854
        case 4:
1855
            ret = ldl_code(addr);
1856
            break;
1857
        case 8:
1858
            ret = ldq_code(addr);
1859
            break;
1860
        }
1861
        break;
1862
    case 0xa: /* User data access */
1863
        switch(size) {
1864
        case 1:
1865
            ret = ldub_user(addr);
1866
            break;
1867
        case 2:
1868
            ret = lduw_user(addr);
1869
            break;
1870
        default:
1871
        case 4:
1872
            ret = ldl_user(addr);
1873
            break;
1874
        case 8:
1875
            ret = ldq_user(addr);
1876
            break;
1877
        }
1878
        break;
1879
    case 0xb: /* Supervisor data access */
1880
        switch(size) {
1881
        case 1:
1882
            ret = ldub_kernel(addr);
1883
            break;
1884
        case 2:
1885
            ret = lduw_kernel(addr);
1886
            break;
1887
        default:
1888
        case 4:
1889
            ret = ldl_kernel(addr);
1890
            break;
1891
        case 8:
1892
            ret = ldq_kernel(addr);
1893
            break;
1894
        }
1895
        break;
1896
    case 0xc: /* I-cache tag */
1897
    case 0xd: /* I-cache data */
1898
    case 0xe: /* D-cache tag */
1899
    case 0xf: /* D-cache data */
1900
        break;
1901
    case 0x20: /* MMU passthrough */
1902
        switch(size) {
1903
        case 1:
1904
            ret = ldub_phys(addr);
1905
            break;
1906
        case 2:
1907
            ret = lduw_phys(addr);
1908
            break;
1909
        default:
1910
        case 4:
1911
            ret = ldl_phys(addr);
1912
            break;
1913
        case 8:
1914
            ret = ldq_phys(addr);
1915
            break;
1916
        }
1917
        break;
1918
    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1919
        switch(size) {
1920
        case 1:
1921
            ret = ldub_phys((target_phys_addr_t)addr
1922
                            | ((target_phys_addr_t)(asi & 0xf) << 32));
1923
            break;
1924
        case 2:
1925
            ret = lduw_phys((target_phys_addr_t)addr
1926
                            | ((target_phys_addr_t)(asi & 0xf) << 32));
1927
            break;
1928
        default:
1929
        case 4:
1930
            ret = ldl_phys((target_phys_addr_t)addr
1931
                           | ((target_phys_addr_t)(asi & 0xf) << 32));
1932
            break;
1933
        case 8:
1934
            ret = ldq_phys((target_phys_addr_t)addr
1935
                           | ((target_phys_addr_t)(asi & 0xf) << 32));
1936
            break;
1937
        }
1938
        break;
1939
    case 0x30: // Turbosparc secondary cache diagnostic
1940
    case 0x31: // Turbosparc RAM snoop
1941
    case 0x32: // Turbosparc page table descriptor diagnostic
1942
    case 0x39: /* data cache diagnostic register */
1943
    case 0x4c: /* SuperSPARC MMU Breakpoint Action register */
1944
        ret = 0;
1945
        break;
1946
    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
1947
        {
1948
            int reg = (addr >> 8) & 3;
1949

    
1950
            switch(reg) {
1951
            case 0: /* Breakpoint Value (Addr) */
1952
                ret = env->mmubpregs[reg];
1953
                break;
1954
            case 1: /* Breakpoint Mask */
1955
                ret = env->mmubpregs[reg];
1956
                break;
1957
            case 2: /* Breakpoint Control */
1958
                ret = env->mmubpregs[reg];
1959
                break;
1960
            case 3: /* Breakpoint Status */
1961
                ret = env->mmubpregs[reg];
1962
                env->mmubpregs[reg] = 0ULL;
1963
                break;
1964
            }
1965
            DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
1966
                        ret);
1967
        }
1968
        break;
1969
    case 8: /* User code access, XXX */
1970
    default:
1971
        do_unassigned_access(addr, 0, 0, asi, size);
1972
        ret = 0;
1973
        break;
1974
    }
1975
    if (sign) {
1976
        switch(size) {
1977
        case 1:
1978
            ret = (int8_t) ret;
1979
            break;
1980
        case 2:
1981
            ret = (int16_t) ret;
1982
            break;
1983
        case 4:
1984
            ret = (int32_t) ret;
1985
            break;
1986
        default:
1987
            break;
1988
        }
1989
    }
1990
#ifdef DEBUG_ASI
1991
    dump_asi("read ", last_addr, asi, size, ret);
1992
#endif
1993
    return ret;
1994
}
1995

    
1996
void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
1997
{
1998
    helper_check_align(addr, size - 1);
1999
    switch(asi) {
2000
    case 2: /* SuperSparc MXCC registers and Leon3 cache control */
2001
        switch (addr) {
2002
        case 0x00:          /* Leon3 Cache Control */
2003
        case 0x08:          /* Leon3 Instruction Cache config */
2004
        case 0x0C:          /* Leon3 Date Cache config */
2005
            if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
2006
                leon3_cache_control_st(addr, val, size);
2007
            }
2008
            break;
2009

    
2010
        case 0x01c00000: /* MXCC stream data register 0 */
2011
            if (size == 8)
2012
                env->mxccdata[0] = val;
2013
            else
2014
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2015
                             size);
2016
            break;
2017
        case 0x01c00008: /* MXCC stream data register 1 */
2018
            if (size == 8)
2019
                env->mxccdata[1] = val;
2020
            else
2021
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2022
                             size);
2023
            break;
2024
        case 0x01c00010: /* MXCC stream data register 2 */
2025
            if (size == 8)
2026
                env->mxccdata[2] = val;
2027
            else
2028
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2029
                             size);
2030
            break;
2031
        case 0x01c00018: /* MXCC stream data register 3 */
2032
            if (size == 8)
2033
                env->mxccdata[3] = val;
2034
            else
2035
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2036
                             size);
2037
            break;
2038
        case 0x01c00100: /* MXCC stream source */
2039
            if (size == 8)
2040
                env->mxccregs[0] = val;
2041
            else
2042
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2043
                             size);
2044
            env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2045
                                        0);
2046
            env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2047
                                        8);
2048
            env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2049
                                        16);
2050
            env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2051
                                        24);
2052
            break;
2053
        case 0x01c00200: /* MXCC stream destination */
2054
            if (size == 8)
2055
                env->mxccregs[1] = val;
2056
            else
2057
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2058
                             size);
2059
            stq_phys((env->mxccregs[1] & 0xffffffffULL) +  0,
2060
                     env->mxccdata[0]);
2061
            stq_phys((env->mxccregs[1] & 0xffffffffULL) +  8,
2062
                     env->mxccdata[1]);
2063
            stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
2064
                     env->mxccdata[2]);
2065
            stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
2066
                     env->mxccdata[3]);
2067
            break;
2068
        case 0x01c00a00: /* MXCC control register */
2069
            if (size == 8)
2070
                env->mxccregs[3] = val;
2071
            else
2072
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2073
                             size);
2074
            break;
2075
        case 0x01c00a04: /* MXCC control register */
2076
            if (size == 4)
2077
                env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
2078
                    | val;
2079
            else
2080
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2081
                             size);
2082
            break;
2083
        case 0x01c00e00: /* MXCC error register  */
2084
            // writing a 1 bit clears the error
2085
            if (size == 8)
2086
                env->mxccregs[6] &= ~val;
2087
            else
2088
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2089
                             size);
2090
            break;
2091
        case 0x01c00f00: /* MBus port address register */
2092
            if (size == 8)
2093
                env->mxccregs[7] = val;
2094
            else
2095
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2096
                             size);
2097
            break;
2098
        default:
2099
            DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
2100
                         size);
2101
            break;
2102
        }
2103
        DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
2104
                     asi, size, addr, val);
2105
#ifdef DEBUG_MXCC
2106
        dump_mxcc(env);
2107
#endif
2108
        break;
2109
    case 3: /* MMU flush */
2110
        {
2111
            int mmulev;
2112

    
2113
            mmulev = (addr >> 8) & 15;
2114
            DPRINTF_MMU("mmu flush level %d\n", mmulev);
2115
            switch (mmulev) {
2116
            case 0: // flush page
2117
                tlb_flush_page(env, addr & 0xfffff000);
2118
                break;
2119
            case 1: // flush segment (256k)
2120
            case 2: // flush region (16M)
2121
            case 3: // flush context (4G)
2122
            case 4: // flush entire
2123
                tlb_flush(env, 1);
2124
                break;
2125
            default:
2126
                break;
2127
            }
2128
#ifdef DEBUG_MMU
2129
            dump_mmu(stdout, fprintf, env);
2130
#endif
2131
        }
2132
        break;
2133
    case 4: /* write MMU regs */
2134
        {
2135
            int reg = (addr >> 8) & 0x1f;
2136
            uint32_t oldreg;
2137

    
2138
            oldreg = env->mmuregs[reg];
2139
            switch(reg) {
2140
            case 0: // Control Register
2141
                env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
2142
                                    (val & 0x00ffffff);
2143
                // Mappings generated during no-fault mode or MMU
2144
                // disabled mode are invalid in normal mode
2145
                if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
2146
                    (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
2147
                    tlb_flush(env, 1);
2148
                break;
2149
            case 1: // Context Table Pointer Register
2150
                env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
2151
                break;
2152
            case 2: // Context Register
2153
                env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
2154
                if (oldreg != env->mmuregs[reg]) {
2155
                    /* we flush when the MMU context changes because
2156
                       QEMU has no MMU context support */
2157
                    tlb_flush(env, 1);
2158
                }
2159
                break;
2160
            case 3: // Synchronous Fault Status Register with Clear
2161
            case 4: // Synchronous Fault Address Register
2162
                break;
2163
            case 0x10: // TLB Replacement Control Register
2164
                env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
2165
                break;
2166
            case 0x13: // Synchronous Fault Status Register with Read and Clear
2167
                env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
2168
                break;
2169
            case 0x14: // Synchronous Fault Address Register
2170
                env->mmuregs[4] = val;
2171
                break;
2172
            default:
2173
                env->mmuregs[reg] = val;
2174
                break;
2175
            }
2176
            if (oldreg != env->mmuregs[reg]) {
2177
                DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
2178
                            reg, oldreg, env->mmuregs[reg]);
2179
            }
2180
#ifdef DEBUG_MMU
2181
            dump_mmu(stdout, fprintf, env);
2182
#endif
2183
        }
2184
        break;
2185
    case 5: // Turbosparc ITLB Diagnostic
2186
    case 6: // Turbosparc DTLB Diagnostic
2187
    case 7: // Turbosparc IOTLB Diagnostic
2188
        break;
2189
    case 0xa: /* User data access */
2190
        switch(size) {
2191
        case 1:
2192
            stb_user(addr, val);
2193
            break;
2194
        case 2:
2195
            stw_user(addr, val);
2196
            break;
2197
        default:
2198
        case 4:
2199
            stl_user(addr, val);
2200
            break;
2201
        case 8:
2202
            stq_user(addr, val);
2203
            break;
2204
        }
2205
        break;
2206
    case 0xb: /* Supervisor data access */
2207
        switch(size) {
2208
        case 1:
2209
            stb_kernel(addr, val);
2210
            break;
2211
        case 2:
2212
            stw_kernel(addr, val);
2213
            break;
2214
        default:
2215
        case 4:
2216
            stl_kernel(addr, val);
2217
            break;
2218
        case 8:
2219
            stq_kernel(addr, val);
2220
            break;
2221
        }
2222
        break;
2223
    case 0xc: /* I-cache tag */
2224
    case 0xd: /* I-cache data */
2225
    case 0xe: /* D-cache tag */
2226
    case 0xf: /* D-cache data */
2227
    case 0x10: /* I/D-cache flush page */
2228
    case 0x11: /* I/D-cache flush segment */
2229
    case 0x12: /* I/D-cache flush region */
2230
    case 0x13: /* I/D-cache flush context */
2231
    case 0x14: /* I/D-cache flush user */
2232
        break;
2233
    case 0x17: /* Block copy, sta access */
2234
        {
2235
            // val = src
2236
            // addr = dst
2237
            // copy 32 bytes
2238
            unsigned int i;
2239
            uint32_t src = val & ~3, dst = addr & ~3, temp;
2240

    
2241
            for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
2242
                temp = ldl_kernel(src);
2243
                stl_kernel(dst, temp);
2244
            }
2245
        }
2246
        break;
2247
    case 0x1f: /* Block fill, stda access */
2248
        {
2249
            // addr = dst
2250
            // fill 32 bytes with val
2251
            unsigned int i;
2252
            uint32_t dst = addr & 7;
2253

    
2254
            for (i = 0; i < 32; i += 8, dst += 8)
2255
                stq_kernel(dst, val);
2256
        }
2257
        break;
2258
    case 0x20: /* MMU passthrough */
2259
        {
2260
            switch(size) {
2261
            case 1:
2262
                stb_phys(addr, val);
2263
                break;
2264
            case 2:
2265
                stw_phys(addr, val);
2266
                break;
2267
            case 4:
2268
            default:
2269
                stl_phys(addr, val);
2270
                break;
2271
            case 8:
2272
                stq_phys(addr, val);
2273
                break;
2274
            }
2275
        }
2276
        break;
2277
    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
2278
        {
2279
            switch(size) {
2280
            case 1:
2281
                stb_phys((target_phys_addr_t)addr
2282
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2283
                break;
2284
            case 2:
2285
                stw_phys((target_phys_addr_t)addr
2286
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2287
                break;
2288
            case 4:
2289
            default:
2290
                stl_phys((target_phys_addr_t)addr
2291
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2292
                break;
2293
            case 8:
2294
                stq_phys((target_phys_addr_t)addr
2295
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2296
                break;
2297
            }
2298
        }
2299
        break;
2300
    case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
2301
    case 0x31: // store buffer data, Ross RT620 I-cache flush or
2302
               // Turbosparc snoop RAM
2303
    case 0x32: // store buffer control or Turbosparc page table
2304
               // descriptor diagnostic
2305
    case 0x36: /* I-cache flash clear */
2306
    case 0x37: /* D-cache flash clear */
2307
    case 0x4c: /* breakpoint action */
2308
        break;
2309
    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
2310
        {
2311
            int reg = (addr >> 8) & 3;
2312

    
2313
            switch(reg) {
2314
            case 0: /* Breakpoint Value (Addr) */
2315
                env->mmubpregs[reg] = (val & 0xfffffffffULL);
2316
                break;
2317
            case 1: /* Breakpoint Mask */
2318
                env->mmubpregs[reg] = (val & 0xfffffffffULL);
2319
                break;
2320
            case 2: /* Breakpoint Control */
2321
                env->mmubpregs[reg] = (val & 0x7fULL);
2322
                break;
2323
            case 3: /* Breakpoint Status */
2324
                env->mmubpregs[reg] = (val & 0xfULL);
2325
                break;
2326
            }
2327
            DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
2328
                        env->mmuregs[reg]);
2329
        }
2330
        break;
2331
    case 8: /* User code access, XXX */
2332
    case 9: /* Supervisor code access, XXX */
2333
    default:
2334
        do_unassigned_access(addr, 1, 0, asi, size);
2335
        break;
2336
    }
2337
#ifdef DEBUG_ASI
2338
    dump_asi("write", addr, asi, size, val);
2339
#endif
2340
}
2341

    
2342
#endif /* CONFIG_USER_ONLY */
2343
#else /* TARGET_SPARC64 */
2344

    
2345
#ifdef CONFIG_USER_ONLY
2346
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2347
{
2348
    uint64_t ret = 0;
2349
#if defined(DEBUG_ASI)
2350
    target_ulong last_addr = addr;
2351
#endif
2352

    
2353
    if (asi < 0x80)
2354
        raise_exception(TT_PRIV_ACT);
2355

    
2356
    helper_check_align(addr, size - 1);
2357
    addr = asi_address_mask(env, asi, addr);
2358

    
2359
    switch (asi) {
2360
    case 0x82: // Primary no-fault
2361
    case 0x8a: // Primary no-fault LE
2362
        if (page_check_range(addr, size, PAGE_READ) == -1) {
2363
#ifdef DEBUG_ASI
2364
            dump_asi("read ", last_addr, asi, size, ret);
2365
#endif
2366
            return 0;
2367
        }
2368
        // Fall through
2369
    case 0x80: // Primary
2370
    case 0x88: // Primary LE
2371
        {
2372
            switch(size) {
2373
            case 1:
2374
                ret = ldub_raw(addr);
2375
                break;
2376
            case 2:
2377
                ret = lduw_raw(addr);
2378
                break;
2379
            case 4:
2380
                ret = ldl_raw(addr);
2381
                break;
2382
            default:
2383
            case 8:
2384
                ret = ldq_raw(addr);
2385
                break;
2386
            }
2387
        }
2388
        break;
2389
    case 0x83: // Secondary no-fault
2390
    case 0x8b: // Secondary no-fault LE
2391
        if (page_check_range(addr, size, PAGE_READ) == -1) {
2392
#ifdef DEBUG_ASI
2393
            dump_asi("read ", last_addr, asi, size, ret);
2394
#endif
2395
            return 0;
2396
        }
2397
        // Fall through
2398
    case 0x81: // Secondary
2399
    case 0x89: // Secondary LE
2400
        // XXX
2401
        break;
2402
    default:
2403
        break;
2404
    }
2405

    
2406
    /* Convert from little endian */
2407
    switch (asi) {
2408
    case 0x88: // Primary LE
2409
    case 0x89: // Secondary LE
2410
    case 0x8a: // Primary no-fault LE
2411
    case 0x8b: // Secondary no-fault LE
2412
        switch(size) {
2413
        case 2:
2414
            ret = bswap16(ret);
2415
            break;
2416
        case 4:
2417
            ret = bswap32(ret);
2418
            break;
2419
        case 8:
2420
            ret = bswap64(ret);
2421
            break;
2422
        default:
2423
            break;
2424
        }
2425
    default:
2426
        break;
2427
    }
2428

    
2429
    /* Convert to signed number */
2430
    if (sign) {
2431
        switch(size) {
2432
        case 1:
2433
            ret = (int8_t) ret;
2434
            break;
2435
        case 2:
2436
            ret = (int16_t) ret;
2437
            break;
2438
        case 4:
2439
            ret = (int32_t) ret;
2440
            break;
2441
        default:
2442
            break;
2443
        }
2444
    }
2445
#ifdef DEBUG_ASI
2446
    dump_asi("read ", last_addr, asi, size, ret);
2447
#endif
2448
    return ret;
2449
}
2450

    
2451
void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2452
{
2453
#ifdef DEBUG_ASI
2454
    dump_asi("write", addr, asi, size, val);
2455
#endif
2456
    if (asi < 0x80)
2457
        raise_exception(TT_PRIV_ACT);
2458

    
2459
    helper_check_align(addr, size - 1);
2460
    addr = asi_address_mask(env, asi, addr);
2461

    
2462
    /* Convert to little endian */
2463
    switch (asi) {
2464
    case 0x88: // Primary LE
2465
    case 0x89: // Secondary LE
2466
        switch(size) {
2467
        case 2:
2468
            val = bswap16(val);
2469
            break;
2470
        case 4:
2471
            val = bswap32(val);
2472
            break;
2473
        case 8:
2474
            val = bswap64(val);
2475
            break;
2476
        default:
2477
            break;
2478
        }
2479
    default:
2480
        break;
2481
    }
2482

    
2483
    switch(asi) {
2484
    case 0x80: // Primary
2485
    case 0x88: // Primary LE
2486
        {
2487
            switch(size) {
2488
            case 1:
2489
                stb_raw(addr, val);
2490
                break;
2491
            case 2:
2492
                stw_raw(addr, val);
2493
                break;
2494
            case 4:
2495
                stl_raw(addr, val);
2496
                break;
2497
            case 8:
2498
            default:
2499
                stq_raw(addr, val);
2500
                break;
2501
            }
2502
        }
2503
        break;
2504
    case 0x81: // Secondary
2505
    case 0x89: // Secondary LE
2506
        // XXX
2507
        return;
2508

    
2509
    case 0x82: // Primary no-fault, RO
2510
    case 0x83: // Secondary no-fault, RO
2511
    case 0x8a: // Primary no-fault LE, RO
2512
    case 0x8b: // Secondary no-fault LE, RO
2513
    default:
2514
        do_unassigned_access(addr, 1, 0, 1, size);
2515
        return;
2516
    }
2517
}
2518

    
2519
#else /* CONFIG_USER_ONLY */
2520

    
2521
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2522
{
2523
    uint64_t ret = 0;
2524
#if defined(DEBUG_ASI)
2525
    target_ulong last_addr = addr;
2526
#endif
2527

    
2528
    asi &= 0xff;
2529

    
2530
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2531
        || (cpu_has_hypervisor(env)
2532
            && asi >= 0x30 && asi < 0x80
2533
            && !(env->hpstate & HS_PRIV)))
2534
        raise_exception(TT_PRIV_ACT);
2535

    
2536
    helper_check_align(addr, size - 1);
2537
    addr = asi_address_mask(env, asi, addr);
2538

    
2539
    switch (asi) {
2540
    case 0x82: // Primary no-fault
2541
    case 0x8a: // Primary no-fault LE
2542
    case 0x83: // Secondary no-fault
2543
    case 0x8b: // Secondary no-fault LE
2544
        {
2545
            /* secondary space access has lowest asi bit equal to 1 */
2546
            int access_mmu_idx = ( asi & 1 ) ? MMU_KERNEL_IDX
2547
                                             : MMU_KERNEL_SECONDARY_IDX;
2548

    
2549
            if (cpu_get_phys_page_nofault(env, addr, access_mmu_idx) == -1ULL) {
2550
#ifdef DEBUG_ASI
2551
                dump_asi("read ", last_addr, asi, size, ret);
2552
#endif
2553
                return 0;
2554
            }
2555
        }
2556
        // Fall through
2557
    case 0x10: // As if user primary
2558
    case 0x11: // As if user secondary
2559
    case 0x18: // As if user primary LE
2560
    case 0x19: // As if user secondary LE
2561
    case 0x80: // Primary
2562
    case 0x81: // Secondary
2563
    case 0x88: // Primary LE
2564
    case 0x89: // Secondary LE
2565
    case 0xe2: // UA2007 Primary block init
2566
    case 0xe3: // UA2007 Secondary block init
2567
        if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2568
            if (cpu_hypervisor_mode(env)) {
2569
                switch(size) {
2570
                case 1:
2571
                    ret = ldub_hypv(addr);
2572
                    break;
2573
                case 2:
2574
                    ret = lduw_hypv(addr);
2575
                    break;
2576
                case 4:
2577
                    ret = ldl_hypv(addr);
2578
                    break;
2579
                default:
2580
                case 8:
2581
                    ret = ldq_hypv(addr);
2582
                    break;
2583
                }
2584
            } else {
2585
                /* secondary space access has lowest asi bit equal to 1 */
2586
                if (asi & 1) {
2587
                    switch(size) {
2588
                    case 1:
2589
                        ret = ldub_kernel_secondary(addr);
2590
                        break;
2591
                    case 2:
2592
                        ret = lduw_kernel_secondary(addr);
2593
                        break;
2594
                    case 4:
2595
                        ret = ldl_kernel_secondary(addr);
2596
                        break;
2597
                    default:
2598
                    case 8:
2599
                        ret = ldq_kernel_secondary(addr);
2600
                        break;
2601
                    }
2602
                } else {
2603
                    switch(size) {
2604
                    case 1:
2605
                        ret = ldub_kernel(addr);
2606
                        break;
2607
                    case 2:
2608
                        ret = lduw_kernel(addr);
2609
                        break;
2610
                    case 4:
2611
                        ret = ldl_kernel(addr);
2612
                        break;
2613
                    default:
2614
                    case 8:
2615
                        ret = ldq_kernel(addr);
2616
                        break;
2617
                    }
2618
                }
2619
            }
2620
        } else {
2621
            /* secondary space access has lowest asi bit equal to 1 */
2622
            if (asi & 1) {
2623
                switch(size) {
2624
                case 1:
2625
                    ret = ldub_user_secondary(addr);
2626
                    break;
2627
                case 2:
2628
                    ret = lduw_user_secondary(addr);
2629
                    break;
2630
                case 4:
2631
                    ret = ldl_user_secondary(addr);
2632
                    break;
2633
                default:
2634
                case 8:
2635
                    ret = ldq_user_secondary(addr);
2636
                    break;
2637
                }
2638
            } else {
2639
                switch(size) {
2640
                case 1:
2641
                    ret = ldub_user(addr);
2642
                    break;
2643
                case 2:
2644
                    ret = lduw_user(addr);
2645
                    break;
2646
                case 4:
2647
                    ret = ldl_user(addr);
2648
                    break;
2649
                default:
2650
                case 8:
2651
                    ret = ldq_user(addr);
2652
                    break;
2653
                }
2654
            }
2655
        }
2656
        break;
2657
    case 0x14: // Bypass
2658
    case 0x15: // Bypass, non-cacheable
2659
    case 0x1c: // Bypass LE
2660
    case 0x1d: // Bypass, non-cacheable LE
2661
        {
2662
            switch(size) {
2663
            case 1:
2664
                ret = ldub_phys(addr);
2665
                break;
2666
            case 2:
2667
                ret = lduw_phys(addr);
2668
                break;
2669
            case 4:
2670
                ret = ldl_phys(addr);
2671
                break;
2672
            default:
2673
            case 8:
2674
                ret = ldq_phys(addr);
2675
                break;
2676
            }
2677
            break;
2678
        }
2679
    case 0x24: // Nucleus quad LDD 128 bit atomic
2680
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2681
        //  Only ldda allowed
2682
        raise_exception(TT_ILL_INSN);
2683
        return 0;
2684
    case 0x04: // Nucleus
2685
    case 0x0c: // Nucleus Little Endian (LE)
2686
    {
2687
        switch(size) {
2688
        case 1:
2689
            ret = ldub_nucleus(addr);
2690
            break;
2691
        case 2:
2692
            ret = lduw_nucleus(addr);
2693
            break;
2694
        case 4:
2695
            ret = ldl_nucleus(addr);
2696
            break;
2697
        default:
2698
        case 8:
2699
            ret = ldq_nucleus(addr);
2700
            break;
2701
        }
2702
        break;
2703
    }
2704
    case 0x4a: // UPA config
2705
        // XXX
2706
        break;
2707
    case 0x45: // LSU
2708
        ret = env->lsu;
2709
        break;
2710
    case 0x50: // I-MMU regs
2711
        {
2712
            int reg = (addr >> 3) & 0xf;
2713

    
2714
            if (reg == 0) {
2715
                // I-TSB Tag Target register
2716
                ret = ultrasparc_tag_target(env->immu.tag_access);
2717
            } else {
2718
                ret = env->immuregs[reg];
2719
            }
2720

    
2721
            break;
2722
        }
2723
    case 0x51: // I-MMU 8k TSB pointer
2724
        {
2725
            // env->immuregs[5] holds I-MMU TSB register value
2726
            // env->immuregs[6] holds I-MMU Tag Access register value
2727
            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2728
                                         8*1024);
2729
            break;
2730
        }
2731
    case 0x52: // I-MMU 64k TSB pointer
2732
        {
2733
            // env->immuregs[5] holds I-MMU TSB register value
2734
            // env->immuregs[6] holds I-MMU Tag Access register value
2735
            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2736
                                         64*1024);
2737
            break;
2738
        }
2739
    case 0x55: // I-MMU data access
2740
        {
2741
            int reg = (addr >> 3) & 0x3f;
2742

    
2743
            ret = env->itlb[reg].tte;
2744
            break;
2745
        }
2746
    case 0x56: // I-MMU tag read
2747
        {
2748
            int reg = (addr >> 3) & 0x3f;
2749

    
2750
            ret = env->itlb[reg].tag;
2751
            break;
2752
        }
2753
    case 0x58: // D-MMU regs
2754
        {
2755
            int reg = (addr >> 3) & 0xf;
2756

    
2757
            if (reg == 0) {
2758
                // D-TSB Tag Target register
2759
                ret = ultrasparc_tag_target(env->dmmu.tag_access);
2760
            } else {
2761
                ret = env->dmmuregs[reg];
2762
            }
2763
            break;
2764
        }
2765
    case 0x59: // D-MMU 8k TSB pointer
2766
        {
2767
            // env->dmmuregs[5] holds D-MMU TSB register value
2768
            // env->dmmuregs[6] holds D-MMU Tag Access register value
2769
            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2770
                                         8*1024);
2771
            break;
2772
        }
2773
    case 0x5a: // D-MMU 64k TSB pointer
2774
        {
2775
            // env->dmmuregs[5] holds D-MMU TSB register value
2776
            // env->dmmuregs[6] holds D-MMU Tag Access register value
2777
            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2778
                                         64*1024);
2779
            break;
2780
        }
2781
    case 0x5d: // D-MMU data access
2782
        {
2783
            int reg = (addr >> 3) & 0x3f;
2784

    
2785
            ret = env->dtlb[reg].tte;
2786
            break;
2787
        }
2788
    case 0x5e: // D-MMU tag read
2789
        {
2790
            int reg = (addr >> 3) & 0x3f;
2791

    
2792
            ret = env->dtlb[reg].tag;
2793
            break;
2794
        }
2795
    case 0x46: // D-cache data
2796
    case 0x47: // D-cache tag access
2797
    case 0x4b: // E-cache error enable
2798
    case 0x4c: // E-cache asynchronous fault status
2799
    case 0x4d: // E-cache asynchronous fault address
2800
    case 0x4e: // E-cache tag data
2801
    case 0x66: // I-cache instruction access
2802
    case 0x67: // I-cache tag access
2803
    case 0x6e: // I-cache predecode
2804
    case 0x6f: // I-cache LRU etc.
2805
    case 0x76: // E-cache tag
2806
    case 0x7e: // E-cache tag
2807
        break;
2808
    case 0x5b: // D-MMU data pointer
2809
    case 0x48: // Interrupt dispatch, RO
2810
    case 0x49: // Interrupt data receive
2811
    case 0x7f: // Incoming interrupt vector, RO
2812
        // XXX
2813
        break;
2814
    case 0x54: // I-MMU data in, WO
2815
    case 0x57: // I-MMU demap, WO
2816
    case 0x5c: // D-MMU data in, WO
2817
    case 0x5f: // D-MMU demap, WO
2818
    case 0x77: // Interrupt vector, WO
2819
    default:
2820
        do_unassigned_access(addr, 0, 0, 1, size);
2821
        ret = 0;
2822
        break;
2823
    }
2824

    
2825
    /* Convert from little endian */
2826
    switch (asi) {
2827
    case 0x0c: // Nucleus Little Endian (LE)
2828
    case 0x18: // As if user primary LE
2829
    case 0x19: // As if user secondary LE
2830
    case 0x1c: // Bypass LE
2831
    case 0x1d: // Bypass, non-cacheable LE
2832
    case 0x88: // Primary LE
2833
    case 0x89: // Secondary LE
2834
    case 0x8a: // Primary no-fault LE
2835
    case 0x8b: // Secondary no-fault LE
2836
        switch(size) {
2837
        case 2:
2838
            ret = bswap16(ret);
2839
            break;
2840
        case 4:
2841
            ret = bswap32(ret);
2842
            break;
2843
        case 8:
2844
            ret = bswap64(ret);
2845
            break;
2846
        default:
2847
            break;
2848
        }
2849
    default:
2850
        break;
2851
    }
2852

    
2853
    /* Convert to signed number */
2854
    if (sign) {
2855
        switch(size) {
2856
        case 1:
2857
            ret = (int8_t) ret;
2858
            break;
2859
        case 2:
2860
            ret = (int16_t) ret;
2861
            break;
2862
        case 4:
2863
            ret = (int32_t) ret;
2864
            break;
2865
        default:
2866
            break;
2867
        }
2868
    }
2869
#ifdef DEBUG_ASI
2870
    dump_asi("read ", last_addr, asi, size, ret);
2871
#endif
2872
    return ret;
2873
}
2874

    
2875
void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2876
{
2877
#ifdef DEBUG_ASI
2878
    dump_asi("write", addr, asi, size, val);
2879
#endif
2880

    
2881
    asi &= 0xff;
2882

    
2883
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2884
        || (cpu_has_hypervisor(env)
2885
            && asi >= 0x30 && asi < 0x80
2886
            && !(env->hpstate & HS_PRIV)))
2887
        raise_exception(TT_PRIV_ACT);
2888

    
2889
    helper_check_align(addr, size - 1);
2890
    addr = asi_address_mask(env, asi, addr);
2891

    
2892
    /* Convert to little endian */
2893
    switch (asi) {
2894
    case 0x0c: // Nucleus Little Endian (LE)
2895
    case 0x18: // As if user primary LE
2896
    case 0x19: // As if user secondary LE
2897
    case 0x1c: // Bypass LE
2898
    case 0x1d: // Bypass, non-cacheable LE
2899
    case 0x88: // Primary LE
2900
    case 0x89: // Secondary LE
2901
        switch(size) {
2902
        case 2:
2903
            val = bswap16(val);
2904
            break;
2905
        case 4:
2906
            val = bswap32(val);
2907
            break;
2908
        case 8:
2909
            val = bswap64(val);
2910
            break;
2911
        default:
2912
            break;
2913
        }
2914
    default:
2915
        break;
2916
    }
2917

    
2918
    switch(asi) {
2919
    case 0x10: // As if user primary
2920
    case 0x11: // As if user secondary
2921
    case 0x18: // As if user primary LE
2922
    case 0x19: // As if user secondary LE
2923
    case 0x80: // Primary
2924
    case 0x81: // Secondary
2925
    case 0x88: // Primary LE
2926
    case 0x89: // Secondary LE
2927
    case 0xe2: // UA2007 Primary block init
2928
    case 0xe3: // UA2007 Secondary block init
2929
        if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2930
            if (cpu_hypervisor_mode(env)) {
2931
                switch(size) {
2932
                case 1:
2933
                    stb_hypv(addr, val);
2934
                    break;
2935
                case 2:
2936
                    stw_hypv(addr, val);
2937
                    break;
2938
                case 4:
2939
                    stl_hypv(addr, val);
2940
                    break;
2941
                case 8:
2942
                default:
2943
                    stq_hypv(addr, val);
2944
                    break;
2945
                }
2946
            } else {
2947
                /* secondary space access has lowest asi bit equal to 1 */
2948
                if (asi & 1) {
2949
                    switch(size) {
2950
                    case 1:
2951
                        stb_kernel_secondary(addr, val);
2952
                        break;
2953
                    case 2:
2954
                        stw_kernel_secondary(addr, val);
2955
                        break;
2956
                    case 4:
2957
                        stl_kernel_secondary(addr, val);
2958
                        break;
2959
                    case 8:
2960
                    default:
2961
                        stq_kernel_secondary(addr, val);
2962
                        break;
2963
                    }
2964
                } else {
2965
                    switch(size) {
2966
                    case 1:
2967
                        stb_kernel(addr, val);
2968
                        break;
2969
                    case 2:
2970
                        stw_kernel(addr, val);
2971
                        break;
2972
                    case 4:
2973
                        stl_kernel(addr, val);
2974
                        break;
2975
                    case 8:
2976
                    default:
2977
                        stq_kernel(addr, val);
2978
                        break;
2979
                    }
2980
                }
2981
            }
2982
        } else {
2983
            /* secondary space access has lowest asi bit equal to 1 */
2984
            if (asi & 1) {
2985
                switch(size) {
2986
                case 1:
2987
                    stb_user_secondary(addr, val);
2988
                    break;
2989
                case 2:
2990
                    stw_user_secondary(addr, val);
2991
                    break;
2992
                case 4:
2993
                    stl_user_secondary(addr, val);
2994
                    break;
2995
                case 8:
2996
                default:
2997
                    stq_user_secondary(addr, val);
2998
                    break;
2999
                }
3000
            } else {
3001
                switch(size) {
3002
                case 1:
3003
                    stb_user(addr, val);
3004
                    break;
3005
                case 2:
3006
                    stw_user(addr, val);
3007
                    break;
3008
                case 4:
3009
                    stl_user(addr, val);
3010
                    break;
3011
                case 8:
3012
                default:
3013
                    stq_user(addr, val);
3014
                    break;
3015
                }
3016
            }
3017
        }
3018
        break;
3019
    case 0x14: // Bypass
3020
    case 0x15: // Bypass, non-cacheable
3021
    case 0x1c: // Bypass LE
3022
    case 0x1d: // Bypass, non-cacheable LE
3023
        {
3024
            switch(size) {
3025
            case 1:
3026
                stb_phys(addr, val);
3027
                break;
3028
            case 2:
3029
                stw_phys(addr, val);
3030
                break;
3031
            case 4:
3032
                stl_phys(addr, val);
3033
                break;
3034
            case 8:
3035
            default:
3036
                stq_phys(addr, val);
3037
                break;
3038
            }
3039
        }
3040
        return;
3041
    case 0x24: // Nucleus quad LDD 128 bit atomic
3042
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
3043
        //  Only ldda allowed
3044
        raise_exception(TT_ILL_INSN);
3045
        return;
3046
    case 0x04: // Nucleus
3047
    case 0x0c: // Nucleus Little Endian (LE)
3048
    {
3049
        switch(size) {
3050
        case 1:
3051
            stb_nucleus(addr, val);
3052
            break;
3053
        case 2:
3054
            stw_nucleus(addr, val);
3055
            break;
3056
        case 4:
3057
            stl_nucleus(addr, val);
3058
            break;
3059
        default:
3060
        case 8:
3061
            stq_nucleus(addr, val);
3062
            break;
3063
        }
3064
        break;
3065
    }
3066

    
3067
    case 0x4a: // UPA config
3068
        // XXX
3069
        return;
3070
    case 0x45: // LSU
3071
        {
3072
            uint64_t oldreg;
3073

    
3074
            oldreg = env->lsu;
3075
            env->lsu = val & (DMMU_E | IMMU_E);
3076
            // Mappings generated during D/I MMU disabled mode are
3077
            // invalid in normal mode
3078
            if (oldreg != env->lsu) {
3079
                DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
3080
                            oldreg, env->lsu);
3081
#ifdef DEBUG_MMU
3082
                dump_mmu(stdout, fprintf, env1);
3083
#endif
3084
                tlb_flush(env, 1);
3085
            }
3086
            return;
3087
        }
3088
    case 0x50: // I-MMU regs
3089
        {
3090
            int reg = (addr >> 3) & 0xf;
3091
            uint64_t oldreg;
3092

    
3093
            oldreg = env->immuregs[reg];
3094
            switch(reg) {
3095
            case 0: // RO
3096
                return;
3097
            case 1: // Not in I-MMU
3098
            case 2:
3099
                return;
3100
            case 3: // SFSR
3101
                if ((val & 1) == 0)
3102
                    val = 0; // Clear SFSR
3103
                env->immu.sfsr = val;
3104
                break;
3105
            case 4: // RO
3106
                return;
3107
            case 5: // TSB access
3108
                DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
3109
                            PRIx64 "\n", env->immu.tsb, val);
3110
                env->immu.tsb = val;
3111
                break;
3112
            case 6: // Tag access
3113
                env->immu.tag_access = val;
3114
                break;
3115
            case 7:
3116
            case 8:
3117
                return;
3118
            default:
3119
                break;
3120
            }
3121

    
3122
            if (oldreg != env->immuregs[reg]) {
3123
                DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
3124
                            PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
3125
            }
3126
#ifdef DEBUG_MMU
3127
            dump_mmu(stdout, fprintf, env);
3128
#endif
3129
            return;
3130
        }
3131
    case 0x54: // I-MMU data in
3132
        replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
3133
        return;
3134
    case 0x55: // I-MMU data access
3135
        {
3136
            // TODO: auto demap
3137

    
3138
            unsigned int i = (addr >> 3) & 0x3f;
3139

    
3140
            replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
3141

    
3142
#ifdef DEBUG_MMU
3143
            DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
3144
            dump_mmu(stdout, fprintf, env);
3145
#endif
3146
            return;
3147
        }
3148
    case 0x57: // I-MMU demap
3149
        demap_tlb(env->itlb, addr, "immu", env);
3150
        return;
3151
    case 0x58: // D-MMU regs
3152
        {
3153
            int reg = (addr >> 3) & 0xf;
3154
            uint64_t oldreg;
3155

    
3156
            oldreg = env->dmmuregs[reg];
3157
            switch(reg) {
3158
            case 0: // RO
3159
            case 4:
3160
                return;
3161
            case 3: // SFSR
3162
                if ((val & 1) == 0) {
3163
                    val = 0; // Clear SFSR, Fault address
3164
                    env->dmmu.sfar = 0;
3165
                }
3166
                env->dmmu.sfsr = val;
3167
                break;
3168
            case 1: // Primary context
3169
                env->dmmu.mmu_primary_context = val;
3170
                /* can be optimized to only flush MMU_USER_IDX
3171
                   and MMU_KERNEL_IDX entries */
3172
                tlb_flush(env, 1);
3173
                break;
3174
            case 2: // Secondary context
3175
                env->dmmu.mmu_secondary_context = val;
3176
                /* can be optimized to only flush MMU_USER_SECONDARY_IDX
3177
                   and MMU_KERNEL_SECONDARY_IDX entries */
3178
                tlb_flush(env, 1);
3179
                break;
3180
            case 5: // TSB access
3181
                DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
3182
                            PRIx64 "\n", env->dmmu.tsb, val);
3183
                env->dmmu.tsb = val;
3184
                break;
3185
            case 6: // Tag access
3186
                env->dmmu.tag_access = val;
3187
                break;
3188
            case 7: // Virtual Watchpoint
3189
            case 8: // Physical Watchpoint
3190
            default:
3191
                env->dmmuregs[reg] = val;
3192
                break;
3193
            }
3194

    
3195
            if (oldreg != env->dmmuregs[reg]) {
3196
                DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
3197
                            PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
3198
            }
3199
#ifdef DEBUG_MMU
3200
            dump_mmu(stdout, fprintf, env);
3201
#endif
3202
            return;
3203
        }
3204
    case 0x5c: // D-MMU data in
3205
        replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
3206
        return;
3207
    case 0x5d: // D-MMU data access
3208
        {
3209
            unsigned int i = (addr >> 3) & 0x3f;
3210

    
3211
            replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
3212

    
3213
#ifdef DEBUG_MMU
3214
            DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
3215
            dump_mmu(stdout, fprintf, env);
3216
#endif
3217
            return;
3218
        }
3219
    case 0x5f: // D-MMU demap
3220
        demap_tlb(env->dtlb, addr, "dmmu", env);
3221
        return;
3222
    case 0x49: // Interrupt data receive
3223
        // XXX
3224
        return;
3225
    case 0x46: // D-cache data
3226
    case 0x47: // D-cache tag access
3227
    case 0x4b: // E-cache error enable
3228
    case 0x4c: // E-cache asynchronous fault status
3229
    case 0x4d: // E-cache asynchronous fault address
3230
    case 0x4e: // E-cache tag data
3231
    case 0x66: // I-cache instruction access
3232
    case 0x67: // I-cache tag access
3233
    case 0x6e: // I-cache predecode
3234
    case 0x6f: // I-cache LRU etc.
3235
    case 0x76: // E-cache tag
3236
    case 0x7e: // E-cache tag
3237
        return;
3238
    case 0x51: // I-MMU 8k TSB pointer, RO
3239
    case 0x52: // I-MMU 64k TSB pointer, RO
3240
    case 0x56: // I-MMU tag read, RO
3241
    case 0x59: // D-MMU 8k TSB pointer, RO
3242
    case 0x5a: // D-MMU 64k TSB pointer, RO
3243
    case 0x5b: // D-MMU data pointer, RO
3244
    case 0x5e: // D-MMU tag read, RO
3245
    case 0x48: // Interrupt dispatch, RO
3246
    case 0x7f: // Incoming interrupt vector, RO
3247
    case 0x82: // Primary no-fault, RO
3248
    case 0x83: // Secondary no-fault, RO
3249
    case 0x8a: // Primary no-fault LE, RO
3250
    case 0x8b: // Secondary no-fault LE, RO
3251
    default:
3252
        do_unassigned_access(addr, 1, 0, 1, size);
3253
        return;
3254
    }
3255
}
3256
#endif /* CONFIG_USER_ONLY */
3257

    
3258
void helper_ldda_asi(target_ulong addr, int asi, int rd)
3259
{
3260
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
3261
        || (cpu_has_hypervisor(env)
3262
            && asi >= 0x30 && asi < 0x80
3263
            && !(env->hpstate & HS_PRIV)))
3264
        raise_exception(TT_PRIV_ACT);
3265

    
3266
    addr = asi_address_mask(env, asi, addr);
3267

    
3268
    switch (asi) {
3269
#if !defined(CONFIG_USER_ONLY)
3270
    case 0x24: // Nucleus quad LDD 128 bit atomic
3271
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
3272
        helper_check_align(addr, 0xf);
3273
        if (rd == 0) {
3274
            env->gregs[1] = ldq_nucleus(addr + 8);
3275
            if (asi == 0x2c)
3276
                bswap64s(&env->gregs[1]);
3277
        } else if (rd < 8) {
3278
            env->gregs[rd] = ldq_nucleus(addr);
3279
            env->gregs[rd + 1] = ldq_nucleus(addr + 8);
3280
            if (asi == 0x2c) {
3281
                bswap64s(&env->gregs[rd]);
3282
                bswap64s(&env->gregs[rd + 1]);
3283
            }
3284
        } else {
3285
            env->regwptr[rd] = ldq_nucleus(addr);
3286
            env->regwptr[rd + 1] = ldq_nucleus(addr + 8);
3287
            if (asi == 0x2c) {
3288
                bswap64s(&env->regwptr[rd]);
3289
                bswap64s(&env->regwptr[rd + 1]);
3290
            }
3291
        }
3292
        break;
3293
#endif
3294
    default:
3295
        helper_check_align(addr, 0x3);
3296
        if (rd == 0)
3297
            env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
3298
        else if (rd < 8) {
3299
            env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
3300
            env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3301
        } else {
3302
            env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
3303
            env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3304
        }
3305
        break;
3306
    }
3307
}
3308

    
3309
void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
3310
{
3311
    unsigned int i;
3312
    target_ulong val;
3313

    
3314
    helper_check_align(addr, 3);
3315
    addr = asi_address_mask(env, asi, addr);
3316

    
3317
    switch (asi) {
3318
    case 0xf0: // Block load primary
3319
    case 0xf1: // Block load secondary
3320
    case 0xf8: // Block load primary LE
3321
    case 0xf9: // Block load secondary LE
3322
        if (rd & 7) {
3323
            raise_exception(TT_ILL_INSN);
3324
            return;
3325
        }
3326
        helper_check_align(addr, 0x3f);
3327
        for (i = 0; i < 16; i++) {
3328
            *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
3329
                                                         0);
3330
            addr += 4;
3331
        }
3332

    
3333
        return;
3334
    case 0x70: // Block load primary, user privilege
3335
    case 0x71: // Block load secondary, user privilege
3336
        if (rd & 7) {
3337
            raise_exception(TT_ILL_INSN);
3338
            return;
3339
        }
3340
        helper_check_align(addr, 0x3f);
3341
        for (i = 0; i < 16; i++) {
3342
            *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x1f, 4,
3343
                                                         0);
3344
            addr += 4;
3345
        }
3346

    
3347
        return;
3348
    default:
3349
        break;
3350
    }
3351

    
3352
    val = helper_ld_asi(addr, asi, size, 0);
3353
    switch(size) {
3354
    default:
3355
    case 4:
3356
        *((uint32_t *)&env->fpr[rd]) = val;
3357
        break;
3358
    case 8:
3359
        *((int64_t *)&DT0) = val;
3360
        break;
3361
    case 16:
3362
        // XXX
3363
        break;
3364
    }
3365
}
3366

    
3367
void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
3368
{
3369
    unsigned int i;
3370
    target_ulong val = 0;
3371

    
3372
    helper_check_align(addr, 3);
3373
    addr = asi_address_mask(env, asi, addr);
3374

    
3375
    switch (asi) {
3376
    case 0xe0: // UA2007 Block commit store primary (cache flush)
3377
    case 0xe1: // UA2007 Block commit store secondary (cache flush)
3378
    case 0xf0: // Block store primary
3379
    case 0xf1: // Block store secondary
3380
    case 0xf8: // Block store primary LE
3381
    case 0xf9: // Block store secondary LE
3382
        if (rd & 7) {
3383
            raise_exception(TT_ILL_INSN);
3384
            return;
3385
        }
3386
        helper_check_align(addr, 0x3f);
3387
        for (i = 0; i < 16; i++) {
3388
            val = *(uint32_t *)&env->fpr[rd++];
3389
            helper_st_asi(addr, val, asi & 0x8f, 4);
3390
            addr += 4;
3391
        }
3392

    
3393
        return;
3394
    case 0x70: // Block store primary, user privilege
3395
    case 0x71: // Block store secondary, user privilege
3396
        if (rd & 7) {
3397
            raise_exception(TT_ILL_INSN);
3398
            return;
3399
        }
3400
        helper_check_align(addr, 0x3f);
3401
        for (i = 0; i < 16; i++) {
3402
            val = *(uint32_t *)&env->fpr[rd++];
3403
            helper_st_asi(addr, val, asi & 0x1f, 4);
3404
            addr += 4;
3405
        }
3406

    
3407
        return;
3408
    default:
3409
        break;
3410
    }
3411

    
3412
    switch(size) {
3413
    default:
3414
    case 4:
3415
        val = *((uint32_t *)&env->fpr[rd]);
3416
        break;
3417
    case 8:
3418
        val = *((int64_t *)&DT0);
3419
        break;
3420
    case 16:
3421
        // XXX
3422
        break;
3423
    }
3424
    helper_st_asi(addr, val, asi, size);
3425
}
3426

    
3427
target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
3428
                            target_ulong val2, uint32_t asi)
3429
{
3430
    target_ulong ret;
3431

    
3432
    val2 &= 0xffffffffUL;
3433
    ret = helper_ld_asi(addr, asi, 4, 0);
3434
    ret &= 0xffffffffUL;
3435
    if (val2 == ret)
3436
        helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4);
3437
    return ret;
3438
}
3439

    
3440
target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
3441
                             target_ulong val2, uint32_t asi)
3442
{
3443
    target_ulong ret;
3444

    
3445
    ret = helper_ld_asi(addr, asi, 8, 0);
3446
    if (val2 == ret)
3447
        helper_st_asi(addr, val1, asi, 8);
3448
    return ret;
3449
}
3450
#endif /* TARGET_SPARC64 */
3451

    
3452
#ifndef TARGET_SPARC64
3453
void helper_rett(void)
3454
{
3455
    unsigned int cwp;
3456

    
3457
    if (env->psret == 1)
3458
        raise_exception(TT_ILL_INSN);
3459

    
3460
    env->psret = 1;
3461
    cwp = cwp_inc(env->cwp + 1) ;
3462
    if (env->wim & (1 << cwp)) {
3463
        raise_exception(TT_WIN_UNF);
3464
    }
3465
    set_cwp(cwp);
3466
    env->psrs = env->psrps;
3467
}
3468
#endif
3469

    
3470
static target_ulong helper_udiv_common(target_ulong a, target_ulong b, int cc)
3471
{
3472
    int overflow = 0;
3473
    uint64_t x0;
3474
    uint32_t x1;
3475

    
3476
    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3477
    x1 = (b & 0xffffffff);
3478

    
3479
    if (x1 == 0) {
3480
        raise_exception(TT_DIV_ZERO);
3481
    }
3482

    
3483
    x0 = x0 / x1;
3484
    if (x0 > 0xffffffff) {
3485
        x0 = 0xffffffff;
3486
        overflow = 1;
3487
    }
3488

    
3489
    if (cc) {
3490
        env->cc_dst = x0;
3491
        env->cc_src2 = overflow;
3492
        env->cc_op = CC_OP_DIV;
3493
    }
3494
    return x0;
3495
}
3496

    
3497
target_ulong helper_udiv(target_ulong a, target_ulong b)
3498
{
3499
    return helper_udiv_common(a, b, 0);
3500
}
3501

    
3502
target_ulong helper_udiv_cc(target_ulong a, target_ulong b)
3503
{
3504
    return helper_udiv_common(a, b, 1);
3505
}
3506

    
3507
static target_ulong helper_sdiv_common(target_ulong a, target_ulong b, int cc)
3508
{
3509
    int overflow = 0;
3510
    int64_t x0;
3511
    int32_t x1;
3512

    
3513
    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3514
    x1 = (b & 0xffffffff);
3515

    
3516
    if (x1 == 0) {
3517
        raise_exception(TT_DIV_ZERO);
3518
    }
3519

    
3520
    x0 = x0 / x1;
3521
    if ((int32_t) x0 != x0) {
3522
        x0 = x0 < 0 ? 0x80000000: 0x7fffffff;
3523
        overflow = 1;
3524
    }
3525

    
3526
    if (cc) {
3527
        env->cc_dst = x0;
3528
        env->cc_src2 = overflow;
3529
        env->cc_op = CC_OP_DIV;
3530
    }
3531
    return x0;
3532
}
3533

    
3534
target_ulong helper_sdiv(target_ulong a, target_ulong b)
3535
{
3536
    return helper_sdiv_common(a, b, 0);
3537
}
3538

    
3539
target_ulong helper_sdiv_cc(target_ulong a, target_ulong b)
3540
{
3541
    return helper_sdiv_common(a, b, 1);
3542
}
3543

    
3544
void helper_stdf(target_ulong addr, int mem_idx)
3545
{
3546
    helper_check_align(addr, 7);
3547
#if !defined(CONFIG_USER_ONLY)
3548
    switch (mem_idx) {
3549
    case MMU_USER_IDX:
3550
        stfq_user(addr, DT0);
3551
        break;
3552
    case MMU_KERNEL_IDX:
3553
        stfq_kernel(addr, DT0);
3554
        break;
3555
#ifdef TARGET_SPARC64
3556
    case MMU_HYPV_IDX:
3557
        stfq_hypv(addr, DT0);
3558
        break;
3559
#endif
3560
    default:
3561
        DPRINTF_MMU("helper_stdf: need to check MMU idx %d\n", mem_idx);
3562
        break;
3563
    }
3564
#else
3565
    stfq_raw(address_mask(env, addr), DT0);
3566
#endif
3567
}
3568

    
3569
void helper_lddf(target_ulong addr, int mem_idx)
3570
{
3571
    helper_check_align(addr, 7);
3572
#if !defined(CONFIG_USER_ONLY)
3573
    switch (mem_idx) {
3574
    case MMU_USER_IDX:
3575
        DT0 = ldfq_user(addr);
3576
        break;
3577
    case MMU_KERNEL_IDX:
3578
        DT0 = ldfq_kernel(addr);
3579
        break;
3580
#ifdef TARGET_SPARC64
3581
    case MMU_HYPV_IDX:
3582
        DT0 = ldfq_hypv(addr);
3583
        break;
3584
#endif
3585
    default:
3586
        DPRINTF_MMU("helper_lddf: need to check MMU idx %d\n", mem_idx);
3587
        break;
3588
    }
3589
#else
3590
    DT0 = ldfq_raw(address_mask(env, addr));
3591
#endif
3592
}
3593

    
3594
void helper_ldqf(target_ulong addr, int mem_idx)
3595
{
3596
    // XXX add 128 bit load
3597
    CPU_QuadU u;
3598

    
3599
    helper_check_align(addr, 7);
3600
#if !defined(CONFIG_USER_ONLY)
3601
    switch (mem_idx) {
3602
    case MMU_USER_IDX:
3603
        u.ll.upper = ldq_user(addr);
3604
        u.ll.lower = ldq_user(addr + 8);
3605
        QT0 = u.q;
3606
        break;
3607
    case MMU_KERNEL_IDX:
3608
        u.ll.upper = ldq_kernel(addr);
3609
        u.ll.lower = ldq_kernel(addr + 8);
3610
        QT0 = u.q;
3611
        break;
3612
#ifdef TARGET_SPARC64
3613
    case MMU_HYPV_IDX:
3614
        u.ll.upper = ldq_hypv(addr);
3615
        u.ll.lower = ldq_hypv(addr + 8);
3616
        QT0 = u.q;
3617
        break;
3618
#endif
3619
    default:
3620
        DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx);
3621
        break;
3622
    }
3623
#else
3624
    u.ll.upper = ldq_raw(address_mask(env, addr));
3625
    u.ll.lower = ldq_raw(address_mask(env, addr + 8));
3626
    QT0 = u.q;
3627
#endif
3628
}
3629

    
3630
void helper_stqf(target_ulong addr, int mem_idx)
3631
{
3632
    // XXX add 128 bit store
3633
    CPU_QuadU u;
3634

    
3635
    helper_check_align(addr, 7);
3636
#if !defined(CONFIG_USER_ONLY)
3637
    switch (mem_idx) {
3638
    case MMU_USER_IDX:
3639
        u.q = QT0;
3640
        stq_user(addr, u.ll.upper);
3641
        stq_user(addr + 8, u.ll.lower);
3642
        break;
3643
    case MMU_KERNEL_IDX:
3644
        u.q = QT0;
3645
        stq_kernel(addr, u.ll.upper);
3646
        stq_kernel(addr + 8, u.ll.lower);
3647
        break;
3648
#ifdef TARGET_SPARC64
3649
    case MMU_HYPV_IDX:
3650
        u.q = QT0;
3651
        stq_hypv(addr, u.ll.upper);
3652
        stq_hypv(addr + 8, u.ll.lower);
3653
        break;
3654
#endif
3655
    default:
3656
        DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx);
3657
        break;
3658
    }
3659
#else
3660
    u.q = QT0;
3661
    stq_raw(address_mask(env, addr), u.ll.upper);
3662
    stq_raw(address_mask(env, addr + 8), u.ll.lower);
3663
#endif
3664
}
3665

    
3666
static inline void set_fsr(void)
3667
{
3668
    int rnd_mode;
3669

    
3670
    switch (env->fsr & FSR_RD_MASK) {
3671
    case FSR_RD_NEAREST:
3672
        rnd_mode = float_round_nearest_even;
3673
        break;
3674
    default:
3675
    case FSR_RD_ZERO:
3676
        rnd_mode = float_round_to_zero;
3677
        break;
3678
    case FSR_RD_POS:
3679
        rnd_mode = float_round_up;
3680
        break;
3681
    case FSR_RD_NEG:
3682
        rnd_mode = float_round_down;
3683
        break;
3684
    }
3685
    set_float_rounding_mode(rnd_mode, &env->fp_status);
3686
}
3687

    
3688
void helper_ldfsr(uint32_t new_fsr)
3689
{
3690
    env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK);
3691
    set_fsr();
3692
}
3693

    
3694
#ifdef TARGET_SPARC64
3695
void helper_ldxfsr(uint64_t new_fsr)
3696
{
3697
    env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK);
3698
    set_fsr();
3699
}
3700
#endif
3701

    
3702
void helper_debug(void)
3703
{
3704
    env->exception_index = EXCP_DEBUG;
3705
    cpu_loop_exit();
3706
}
3707

    
3708
#ifndef TARGET_SPARC64
3709
/* XXX: use another pointer for %iN registers to avoid slow wrapping
3710
   handling ? */
3711
void helper_save(void)
3712
{
3713
    uint32_t cwp;
3714

    
3715
    cwp = cwp_dec(env->cwp - 1);
3716
    if (env->wim & (1 << cwp)) {
3717
        raise_exception(TT_WIN_OVF);
3718
    }
3719
    set_cwp(cwp);
3720
}
3721

    
3722
void helper_restore(void)
3723
{
3724
    uint32_t cwp;
3725

    
3726
    cwp = cwp_inc(env->cwp + 1);
3727
    if (env->wim & (1 << cwp)) {
3728
        raise_exception(TT_WIN_UNF);
3729
    }
3730
    set_cwp(cwp);
3731
}
3732

    
3733
void helper_wrpsr(target_ulong new_psr)
3734
{
3735
    if ((new_psr & PSR_CWP) >= env->nwindows) {
3736
        raise_exception(TT_ILL_INSN);
3737
    } else {
3738
        cpu_put_psr(env, new_psr);
3739
    }
3740
}
3741

    
3742
target_ulong helper_rdpsr(void)
3743
{
3744
    return get_psr();
3745
}
3746

    
3747
#else
3748
/* XXX: use another pointer for %iN registers to avoid slow wrapping
3749
   handling ? */
3750
void helper_save(void)
3751
{
3752
    uint32_t cwp;
3753

    
3754
    cwp = cwp_dec(env->cwp - 1);
3755
    if (env->cansave == 0) {
3756
        raise_exception(TT_SPILL | (env->otherwin != 0 ?
3757
                                    (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3758
                                    ((env->wstate & 0x7) << 2)));
3759
    } else {
3760
        if (env->cleanwin - env->canrestore == 0) {
3761
            // XXX Clean windows without trap
3762
            raise_exception(TT_CLRWIN);
3763
        } else {
3764
            env->cansave--;
3765
            env->canrestore++;
3766
            set_cwp(cwp);
3767
        }
3768
    }
3769
}
3770

    
3771
void helper_restore(void)
3772
{
3773
    uint32_t cwp;
3774

    
3775
    cwp = cwp_inc(env->cwp + 1);
3776
    if (env->canrestore == 0) {
3777
        raise_exception(TT_FILL | (env->otherwin != 0 ?
3778
                                   (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3779
                                   ((env->wstate & 0x7) << 2)));
3780
    } else {
3781
        env->cansave++;
3782
        env->canrestore--;
3783
        set_cwp(cwp);
3784
    }
3785
}
3786

    
3787
void helper_flushw(void)
3788
{
3789
    if (env->cansave != env->nwindows - 2) {
3790
        raise_exception(TT_SPILL | (env->otherwin != 0 ?
3791
                                    (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3792
                                    ((env->wstate & 0x7) << 2)));
3793
    }
3794
}
3795

    
3796
void helper_saved(void)
3797
{
3798
    env->cansave++;
3799
    if (env->otherwin == 0)
3800
        env->canrestore--;
3801
    else
3802
        env->otherwin--;
3803
}
3804

    
3805
void helper_restored(void)
3806
{
3807
    env->canrestore++;
3808
    if (env->cleanwin < env->nwindows - 1)
3809
        env->cleanwin++;
3810
    if (env->otherwin == 0)
3811
        env->cansave--;
3812
    else
3813
        env->otherwin--;
3814
}
3815

    
3816
static target_ulong get_ccr(void)
3817
{
3818
    target_ulong psr;
3819

    
3820
    psr = get_psr();
3821

    
3822
    return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20);
3823
}
3824

    
3825
target_ulong cpu_get_ccr(CPUState *env1)
3826
{
3827
    CPUState *saved_env;
3828
    target_ulong ret;
3829

    
3830
    saved_env = env;
3831
    env = env1;
3832
    ret = get_ccr();
3833
    env = saved_env;
3834
    return ret;
3835
}
3836

    
3837
static void put_ccr(target_ulong val)
3838
{
3839
    target_ulong tmp = val;
3840

    
3841
    env->xcc = (tmp >> 4) << 20;
3842
    env->psr = (tmp & 0xf) << 20;
3843
    CC_OP = CC_OP_FLAGS;
3844
}
3845

    
3846
void cpu_put_ccr(CPUState *env1, target_ulong val)
3847
{
3848
    CPUState *saved_env;
3849

    
3850
    saved_env = env;
3851
    env = env1;
3852
    put_ccr(val);
3853
    env = saved_env;
3854
}
3855

    
3856
static target_ulong get_cwp64(void)
3857
{
3858
    return env->nwindows - 1 - env->cwp;
3859
}
3860

    
3861
target_ulong cpu_get_cwp64(CPUState *env1)
3862
{
3863
    CPUState *saved_env;
3864
    target_ulong ret;
3865

    
3866
    saved_env = env;
3867
    env = env1;
3868
    ret = get_cwp64();
3869
    env = saved_env;
3870
    return ret;
3871
}
3872

    
3873
static void put_cwp64(int cwp)
3874
{
3875
    if (unlikely(cwp >= env->nwindows || cwp < 0)) {
3876
        cwp %= env->nwindows;
3877
    }
3878
    set_cwp(env->nwindows - 1 - cwp);
3879
}
3880

    
3881
void cpu_put_cwp64(CPUState *env1, int cwp)
3882
{
3883
    CPUState *saved_env;
3884

    
3885
    saved_env = env;
3886
    env = env1;
3887
    put_cwp64(cwp);
3888
    env = saved_env;
3889
}
3890

    
3891
target_ulong helper_rdccr(void)
3892
{
3893
    return get_ccr();
3894
}
3895

    
3896
void helper_wrccr(target_ulong new_ccr)
3897
{
3898
    put_ccr(new_ccr);
3899
}
3900

    
3901
// CWP handling is reversed in V9, but we still use the V8 register
3902
// order.
3903
target_ulong helper_rdcwp(void)
3904
{
3905
    return get_cwp64();
3906
}
3907

    
3908
void helper_wrcwp(target_ulong new_cwp)
3909
{
3910
    put_cwp64(new_cwp);
3911
}
3912

    
3913
// This function uses non-native bit order
3914
#define GET_FIELD(X, FROM, TO)                                  \
3915
    ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
3916

    
3917
// This function uses the order in the manuals, i.e. bit 0 is 2^0
3918
#define GET_FIELD_SP(X, FROM, TO)               \
3919
    GET_FIELD(X, 63 - (TO), 63 - (FROM))
3920

    
3921
target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
3922
{
3923
    return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
3924
        (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
3925
        (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
3926
        (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
3927
        (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
3928
        (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
3929
        (((pixel_addr >> 55) & 1) << 4) |
3930
        (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
3931
        GET_FIELD_SP(pixel_addr, 11, 12);
3932
}
3933

    
3934
target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
3935
{
3936
    uint64_t tmp;
3937

    
3938
    tmp = addr + offset;
3939
    env->gsr &= ~7ULL;
3940
    env->gsr |= tmp & 7ULL;
3941
    return tmp & ~7ULL;
3942
}
3943

    
3944
target_ulong helper_popc(target_ulong val)
3945
{
3946
    return ctpop64(val);
3947
}
3948

    
3949
static inline uint64_t *get_gregset(uint32_t pstate)
3950
{
3951
    switch (pstate) {
3952
    default:
3953
        DPRINTF_PSTATE("ERROR in get_gregset: active pstate bits=%x%s%s%s\n",
3954
                pstate,
3955
                (pstate & PS_IG) ? " IG" : "",
3956
                (pstate & PS_MG) ? " MG" : "",
3957
                (pstate & PS_AG) ? " AG" : "");
3958
        /* pass through to normal set of global registers */
3959
    case 0:
3960
        return env->bgregs;
3961
    case PS_AG:
3962
        return env->agregs;
3963
    case PS_MG:
3964
        return env->mgregs;
3965
    case PS_IG:
3966
        return env->igregs;
3967
    }
3968
}
3969

    
3970
static inline void change_pstate(uint32_t new_pstate)
3971
{
3972
    uint32_t pstate_regs, new_pstate_regs;
3973
    uint64_t *src, *dst;
3974

    
3975
    if (env->def->features & CPU_FEATURE_GL) {
3976
        // PS_AG is not implemented in this case
3977
        new_pstate &= ~PS_AG;
3978
    }
3979

    
3980
    pstate_regs = env->pstate & 0xc01;
3981
    new_pstate_regs = new_pstate & 0xc01;
3982

    
3983
    if (new_pstate_regs != pstate_regs) {
3984
        DPRINTF_PSTATE("change_pstate: switching regs old=%x new=%x\n",
3985
                       pstate_regs, new_pstate_regs);
3986
        // Switch global register bank
3987
        src = get_gregset(new_pstate_regs);
3988
        dst = get_gregset(pstate_regs);
3989
        memcpy32(dst, env->gregs);
3990
        memcpy32(env->gregs, src);
3991
    }
3992
    else {
3993
        DPRINTF_PSTATE("change_pstate: regs new=%x (unchanged)\n",
3994
                       new_pstate_regs);
3995
    }
3996
    env->pstate = new_pstate;
3997
}
3998

    
3999
void helper_wrpstate(target_ulong new_state)
4000
{
4001
    change_pstate(new_state & 0xf3f);
4002

    
4003
#if !defined(CONFIG_USER_ONLY)
4004
    if (cpu_interrupts_enabled(env)) {
4005
        cpu_check_irqs(env);
4006
    }
4007
#endif
4008
}
4009

    
4010
void helper_wrpil(target_ulong new_pil)
4011
{
4012
#if !defined(CONFIG_USER_ONLY)
4013
    DPRINTF_PSTATE("helper_wrpil old=%x new=%x\n",
4014
                   env->psrpil, (uint32_t)new_pil);
4015

    
4016
    env->psrpil = new_pil;
4017

    
4018
    if (cpu_interrupts_enabled(env)) {
4019
        cpu_check_irqs(env);
4020
    }
4021
#endif
4022
}
4023

    
4024
void helper_done(void)
4025
{
4026
    trap_state* tsptr = cpu_tsptr(env);
4027

    
4028
    env->pc = tsptr->tnpc;
4029
    env->npc = tsptr->tnpc + 4;
4030
    put_ccr(tsptr->tstate >> 32);
4031
    env->asi = (tsptr->tstate >> 24) & 0xff;
4032
    change_pstate((tsptr->tstate >> 8) & 0xf3f);
4033
    put_cwp64(tsptr->tstate & 0xff);
4034
    env->tl--;
4035

    
4036
    DPRINTF_PSTATE("... helper_done tl=%d\n", env->tl);
4037

    
4038
#if !defined(CONFIG_USER_ONLY)
4039
    if (cpu_interrupts_enabled(env)) {
4040
        cpu_check_irqs(env);
4041
    }
4042
#endif
4043
}
4044

    
4045
void helper_retry(void)
4046
{
4047
    trap_state* tsptr = cpu_tsptr(env);
4048

    
4049
    env->pc = tsptr->tpc;
4050
    env->npc = tsptr->tnpc;
4051
    put_ccr(tsptr->tstate >> 32);
4052
    env->asi = (tsptr->tstate >> 24) & 0xff;
4053
    change_pstate((tsptr->tstate >> 8) & 0xf3f);
4054
    put_cwp64(tsptr->tstate & 0xff);
4055
    env->tl--;
4056

    
4057
    DPRINTF_PSTATE("... helper_retry tl=%d\n", env->tl);
4058

    
4059
#if !defined(CONFIG_USER_ONLY)
4060
    if (cpu_interrupts_enabled(env)) {
4061
        cpu_check_irqs(env);
4062
    }
4063
#endif
4064
}
4065

    
4066
static void do_modify_softint(const char* operation, uint32_t value)
4067
{
4068
    if (env->softint != value) {
4069
        env->softint = value;
4070
        DPRINTF_PSTATE(": %s new %08x\n", operation, env->softint);
4071
#if !defined(CONFIG_USER_ONLY)
4072
        if (cpu_interrupts_enabled(env)) {
4073
            cpu_check_irqs(env);
4074
        }
4075
#endif
4076
    }
4077
}
4078

    
4079
void helper_set_softint(uint64_t value)
4080
{
4081
    do_modify_softint("helper_set_softint", env->softint | (uint32_t)value);
4082
}
4083

    
4084
void helper_clear_softint(uint64_t value)
4085
{
4086
    do_modify_softint("helper_clear_softint", env->softint & (uint32_t)~value);
4087
}
4088

    
4089
void helper_write_softint(uint64_t value)
4090
{
4091
    do_modify_softint("helper_write_softint", (uint32_t)value);
4092
}
4093
#endif
4094

    
4095
void helper_flush(target_ulong addr)
4096
{
4097
    addr &= ~7;
4098
    tb_invalidate_page_range(addr, addr + 8);
4099
}
4100

    
4101
#ifdef TARGET_SPARC64
4102
#ifdef DEBUG_PCALL
4103
static const char * const excp_names[0x80] = {
4104
    [TT_TFAULT] = "Instruction Access Fault",
4105
    [TT_TMISS] = "Instruction Access MMU Miss",
4106
    [TT_CODE_ACCESS] = "Instruction Access Error",
4107
    [TT_ILL_INSN] = "Illegal Instruction",
4108
    [TT_PRIV_INSN] = "Privileged Instruction",
4109
    [TT_NFPU_INSN] = "FPU Disabled",
4110
    [TT_FP_EXCP] = "FPU Exception",
4111
    [TT_TOVF] = "Tag Overflow",
4112
    [TT_CLRWIN] = "Clean Windows",
4113
    [TT_DIV_ZERO] = "Division By Zero",
4114
    [TT_DFAULT] = "Data Access Fault",
4115
    [TT_DMISS] = "Data Access MMU Miss",
4116
    [TT_DATA_ACCESS] = "Data Access Error",
4117
    [TT_DPROT] = "Data Protection Error",
4118
    [TT_UNALIGNED] = "Unaligned Memory Access",
4119
    [TT_PRIV_ACT] = "Privileged Action",
4120
    [TT_EXTINT | 0x1] = "External Interrupt 1",
4121
    [TT_EXTINT | 0x2] = "External Interrupt 2",
4122
    [TT_EXTINT | 0x3] = "External Interrupt 3",
4123
    [TT_EXTINT | 0x4] = "External Interrupt 4",
4124
    [TT_EXTINT | 0x5] = "External Interrupt 5",
4125
    [TT_EXTINT | 0x6] = "External Interrupt 6",
4126
    [TT_EXTINT | 0x7] = "External Interrupt 7",
4127
    [TT_EXTINT | 0x8] = "External Interrupt 8",
4128
    [TT_EXTINT | 0x9] = "External Interrupt 9",
4129
    [TT_EXTINT | 0xa] = "External Interrupt 10",
4130
    [TT_EXTINT | 0xb] = "External Interrupt 11",
4131
    [TT_EXTINT | 0xc] = "External Interrupt 12",
4132
    [TT_EXTINT | 0xd] = "External Interrupt 13",
4133
    [TT_EXTINT | 0xe] = "External Interrupt 14",
4134
    [TT_EXTINT | 0xf] = "External Interrupt 15",
4135
};
4136
#endif
4137

    
4138
trap_state* cpu_tsptr(CPUState* env)
4139
{
4140
    return &env->ts[env->tl & MAXTL_MASK];
4141
}
4142

    
4143
void do_interrupt(CPUState *env)
4144
{
4145
    int intno = env->exception_index;
4146
    trap_state* tsptr;
4147

    
4148
#ifdef DEBUG_PCALL
4149
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
4150
        static int count;
4151
        const char *name;
4152

    
4153
        if (intno < 0 || intno >= 0x180)
4154
            name = "Unknown";
4155
        else if (intno >= 0x100)
4156
            name = "Trap Instruction";
4157
        else if (intno >= 0xc0)
4158
            name = "Window Fill";
4159
        else if (intno >= 0x80)
4160
            name = "Window Spill";
4161
        else {
4162
            name = excp_names[intno];
4163
            if (!name)
4164
                name = "Unknown";
4165
        }
4166

    
4167
        qemu_log("%6d: %s (v=%04x) pc=%016" PRIx64 " npc=%016" PRIx64
4168
                " SP=%016" PRIx64 "\n",
4169
                count, name, intno,
4170
                env->pc,
4171
                env->npc, env->regwptr[6]);
4172
        log_cpu_state(env, 0);
4173
#if 0
4174
        {
4175
            int i;
4176
            uint8_t *ptr;
4177

4178
            qemu_log("       code=");
4179
            ptr = (uint8_t *)env->pc;
4180
            for(i = 0; i < 16; i++) {
4181
                qemu_log(" %02x", ldub(ptr + i));
4182
            }
4183
            qemu_log("\n");
4184
        }
4185
#endif
4186
        count++;
4187
    }
4188
#endif
4189
#if !defined(CONFIG_USER_ONLY)
4190
    if (env->tl >= env->maxtl) {
4191
        cpu_abort(env, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
4192
                  " Error state", env->exception_index, env->tl, env->maxtl);
4193
        return;
4194
    }
4195
#endif
4196
    if (env->tl < env->maxtl - 1) {
4197
        env->tl++;
4198
    } else {
4199
        env->pstate |= PS_RED;
4200
        if (env->tl < env->maxtl)
4201
            env->tl++;
4202
    }
4203
    tsptr = cpu_tsptr(env);
4204

    
4205
    tsptr->tstate = (get_ccr() << 32) |
4206
        ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
4207
        get_cwp64();
4208
    tsptr->tpc = env->pc;
4209
    tsptr->tnpc = env->npc;
4210
    tsptr->tt = intno;
4211

    
4212
    switch (intno) {
4213
    case TT_IVEC:
4214
        change_pstate(PS_PEF | PS_PRIV | PS_IG);
4215
        break;
4216
    case TT_TFAULT:
4217
    case TT_DFAULT:
4218
    case TT_TMISS ... TT_TMISS + 3:
4219
    case TT_DMISS ... TT_DMISS + 3:
4220
    case TT_DPROT ... TT_DPROT + 3:
4221
        change_pstate(PS_PEF | PS_PRIV | PS_MG);
4222
        break;
4223
    default:
4224
        change_pstate(PS_PEF | PS_PRIV | PS_AG);
4225
        break;
4226
    }
4227

    
4228
    if (intno == TT_CLRWIN) {
4229
        set_cwp(cwp_dec(env->cwp - 1));
4230
    } else if ((intno & 0x1c0) == TT_SPILL) {
4231
        set_cwp(cwp_dec(env->cwp - env->cansave - 2));
4232
    } else if ((intno & 0x1c0) == TT_FILL) {
4233
        set_cwp(cwp_inc(env->cwp + 1));
4234
    }
4235
    env->tbr &= ~0x7fffULL;
4236
    env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
4237
    env->pc = env->tbr;
4238
    env->npc = env->pc + 4;
4239
    env->exception_index = -1;
4240
}
4241
#else
4242
#ifdef DEBUG_PCALL
4243
static const char * const excp_names[0x80] = {
4244
    [TT_TFAULT] = "Instruction Access Fault",
4245
    [TT_ILL_INSN] = "Illegal Instruction",
4246
    [TT_PRIV_INSN] = "Privileged Instruction",
4247
    [TT_NFPU_INSN] = "FPU Disabled",
4248
    [TT_WIN_OVF] = "Window Overflow",
4249
    [TT_WIN_UNF] = "Window Underflow",
4250
    [TT_UNALIGNED] = "Unaligned Memory Access",
4251
    [TT_FP_EXCP] = "FPU Exception",
4252
    [TT_DFAULT] = "Data Access Fault",
4253
    [TT_TOVF] = "Tag Overflow",
4254
    [TT_EXTINT | 0x1] = "External Interrupt 1",
4255
    [TT_EXTINT | 0x2] = "External Interrupt 2",
4256
    [TT_EXTINT | 0x3] = "External Interrupt 3",
4257
    [TT_EXTINT | 0x4] = "External Interrupt 4",
4258
    [TT_EXTINT | 0x5] = "External Interrupt 5",
4259
    [TT_EXTINT | 0x6] = "External Interrupt 6",
4260
    [TT_EXTINT | 0x7] = "External Interrupt 7",
4261
    [TT_EXTINT | 0x8] = "External Interrupt 8",
4262
    [TT_EXTINT | 0x9] = "External Interrupt 9",
4263
    [TT_EXTINT | 0xa] = "External Interrupt 10",
4264
    [TT_EXTINT | 0xb] = "External Interrupt 11",
4265
    [TT_EXTINT | 0xc] = "External Interrupt 12",
4266
    [TT_EXTINT | 0xd] = "External Interrupt 13",
4267
    [TT_EXTINT | 0xe] = "External Interrupt 14",
4268
    [TT_EXTINT | 0xf] = "External Interrupt 15",
4269
    [TT_TOVF] = "Tag Overflow",
4270
    [TT_CODE_ACCESS] = "Instruction Access Error",
4271
    [TT_DATA_ACCESS] = "Data Access Error",
4272
    [TT_DIV_ZERO] = "Division By Zero",
4273
    [TT_NCP_INSN] = "Coprocessor Disabled",
4274
};
4275
#endif
4276

    
4277
void do_interrupt(CPUState *env)
4278
{
4279
    int cwp, intno = env->exception_index;
4280

    
4281
#ifdef DEBUG_PCALL
4282
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
4283
        static int count;
4284
        const char *name;
4285

    
4286
        if (intno < 0 || intno >= 0x100)
4287
            name = "Unknown";
4288
        else if (intno >= 0x80)
4289
            name = "Trap Instruction";
4290
        else {
4291
            name = excp_names[intno];
4292
            if (!name)
4293
                name = "Unknown";
4294
        }
4295

    
4296
        qemu_log("%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
4297
                count, name, intno,
4298
                env->pc,
4299
                env->npc, env->regwptr[6]);
4300
        log_cpu_state(env, 0);
4301
#if 0
4302
        {
4303
            int i;
4304
            uint8_t *ptr;
4305

4306
            qemu_log("       code=");
4307
            ptr = (uint8_t *)env->pc;
4308
            for(i = 0; i < 16; i++) {
4309
                qemu_log(" %02x", ldub(ptr + i));
4310
            }
4311
            qemu_log("\n");
4312
        }
4313
#endif
4314
        count++;
4315
    }
4316
#endif
4317
#if !defined(CONFIG_USER_ONLY)
4318
    if (env->psret == 0) {
4319
        cpu_abort(env, "Trap 0x%02x while interrupts disabled, Error state",
4320
                  env->exception_index);
4321
        return;
4322
    }
4323
#endif
4324
    env->psret = 0;
4325
    cwp = cwp_dec(env->cwp - 1);
4326
    set_cwp(cwp);
4327
    env->regwptr[9] = env->pc;
4328
    env->regwptr[10] = env->npc;
4329
    env->psrps = env->psrs;
4330
    env->psrs = 1;
4331
    env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
4332
    env->pc = env->tbr;
4333
    env->npc = env->pc + 4;
4334
    env->exception_index = -1;
4335

    
4336
#if !defined(CONFIG_USER_ONLY)
4337
    /* IRQ acknowledgment */
4338
    if ((intno & ~15) == TT_EXTINT && env->qemu_irq_ack != NULL) {
4339
        env->qemu_irq_ack(env->irq_manager, intno);
4340
    }
4341
#endif
4342
}
4343
#endif
4344

    
4345
#if !defined(CONFIG_USER_ONLY)
4346

    
4347
static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4348
                                void *retaddr);
4349

    
4350
#define MMUSUFFIX _mmu
4351
#define ALIGNED_ONLY
4352

    
4353
#define SHIFT 0
4354
#include "softmmu_template.h"
4355

    
4356
#define SHIFT 1
4357
#include "softmmu_template.h"
4358

    
4359
#define SHIFT 2
4360
#include "softmmu_template.h"
4361

    
4362
#define SHIFT 3
4363
#include "softmmu_template.h"
4364

    
4365
/* XXX: make it generic ? */
4366
static void cpu_restore_state2(void *retaddr)
4367
{
4368
    TranslationBlock *tb;
4369
    unsigned long pc;
4370

    
4371
    if (retaddr) {
4372
        /* now we have a real cpu fault */
4373
        pc = (unsigned long)retaddr;
4374
        tb = tb_find_pc(pc);
4375
        if (tb) {
4376
            /* the PC is inside the translated code. It means that we have
4377
               a virtual CPU fault */
4378
            cpu_restore_state(tb, env, pc);
4379
        }
4380
    }
4381
}
4382

    
4383
static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4384
                                void *retaddr)
4385
{
4386
#ifdef DEBUG_UNALIGNED
4387
    printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
4388
           "\n", addr, env->pc);
4389
#endif
4390
    cpu_restore_state2(retaddr);
4391
    raise_exception(TT_UNALIGNED);
4392
}
4393

    
4394
/* try to fill the TLB and return an exception if error. If retaddr is
4395
   NULL, it means that the function was called in C code (i.e. not
4396
   from generated code or from helper.c) */
4397
/* XXX: fix it to restore all registers */
4398
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4399
{
4400
    int ret;
4401
    CPUState *saved_env;
4402

    
4403
    /* XXX: hack to restore env in all cases, even if not called from
4404
       generated code */
4405
    saved_env = env;
4406
    env = cpu_single_env;
4407

    
4408
    ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4409
    if (ret) {
4410
        cpu_restore_state2(retaddr);
4411
        cpu_loop_exit();
4412
    }
4413
    env = saved_env;
4414
}
4415

    
4416
#endif /* !CONFIG_USER_ONLY */
4417

    
4418
#ifndef TARGET_SPARC64
4419
#if !defined(CONFIG_USER_ONLY)
4420
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
4421
                          int is_asi, int size)
4422
{
4423
    CPUState *saved_env;
4424
    int fault_type;
4425

    
4426
    /* XXX: hack to restore env in all cases, even if not called from
4427
       generated code */
4428
    saved_env = env;
4429
    env = cpu_single_env;
4430
#ifdef DEBUG_UNASSIGNED
4431
    if (is_asi)
4432
        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4433
               " asi 0x%02x from " TARGET_FMT_lx "\n",
4434
               is_exec ? "exec" : is_write ? "write" : "read", size,
4435
               size == 1 ? "" : "s", addr, is_asi, env->pc);
4436
    else
4437
        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4438
               " from " TARGET_FMT_lx "\n",
4439
               is_exec ? "exec" : is_write ? "write" : "read", size,
4440
               size == 1 ? "" : "s", addr, env->pc);
4441
#endif
4442
    /* Don't overwrite translation and access faults */
4443
    fault_type = (env->mmuregs[3] & 0x1c) >> 2;
4444
    if ((fault_type > 4) || (fault_type == 0)) {
4445
        env->mmuregs[3] = 0; /* Fault status register */
4446
        if (is_asi)
4447
            env->mmuregs[3] |= 1 << 16;
4448
        if (env->psrs)
4449
            env->mmuregs[3] |= 1 << 5;
4450
        if (is_exec)
4451
            env->mmuregs[3] |= 1 << 6;
4452
        if (is_write)
4453
            env->mmuregs[3] |= 1 << 7;
4454
        env->mmuregs[3] |= (5 << 2) | 2;
4455
        /* SuperSPARC will never place instruction fault addresses in the FAR */
4456
        if (!is_exec) {
4457
            env->mmuregs[4] = addr; /* Fault address register */
4458
        }
4459
    }
4460
    /* overflow (same type fault was not read before another fault) */
4461
    if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
4462
        env->mmuregs[3] |= 1;
4463
    }
4464

    
4465
    if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
4466
        if (is_exec)
4467
            raise_exception(TT_CODE_ACCESS);
4468
        else
4469
            raise_exception(TT_DATA_ACCESS);
4470
    }
4471

    
4472
    /* flush neverland mappings created during no-fault mode,
4473
       so the sequential MMU faults report proper fault types */
4474
    if (env->mmuregs[0] & MMU_NF) {
4475
        tlb_flush(env, 1);
4476
    }
4477

    
4478
    env = saved_env;
4479
}
4480
#endif
4481
#else
4482
#if defined(CONFIG_USER_ONLY)
4483
static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
4484
                          int is_asi, int size)
4485
#else
4486
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
4487
                          int is_asi, int size)
4488
#endif
4489
{
4490
    CPUState *saved_env;
4491

    
4492
    /* XXX: hack to restore env in all cases, even if not called from
4493
       generated code */
4494
    saved_env = env;
4495
    env = cpu_single_env;
4496

    
4497
#ifdef DEBUG_UNASSIGNED
4498
    printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
4499
           "\n", addr, env->pc);
4500
#endif
4501

    
4502
    if (is_exec)
4503
        raise_exception(TT_CODE_ACCESS);
4504
    else
4505
        raise_exception(TT_DATA_ACCESS);
4506

    
4507
    env = saved_env;
4508
}
4509
#endif
4510

    
4511

    
4512
#ifdef TARGET_SPARC64
4513
void helper_tick_set_count(void *opaque, uint64_t count)
4514
{
4515
#if !defined(CONFIG_USER_ONLY)
4516
    cpu_tick_set_count(opaque, count);
4517
#endif
4518
}
4519

    
4520
uint64_t helper_tick_get_count(void *opaque)
4521
{
4522
#if !defined(CONFIG_USER_ONLY)
4523
    return cpu_tick_get_count(opaque);
4524
#else
4525
    return 0;
4526
#endif
4527
}
4528

    
4529
void helper_tick_set_limit(void *opaque, uint64_t limit)
4530
{
4531
#if !defined(CONFIG_USER_ONLY)
4532
    cpu_tick_set_limit(opaque, limit);
4533
#endif
4534
}
4535
#endif