Statistics
| Branch: | Revision:

root / target-sparc / op_helper.c @ 073a0444

History | View | Annotate | Download (117.8 kB)

1
#include "exec.h"
2
#include "host-utils.h"
3
#include "helper.h"
4
#include "sysemu.h"
5

    
6
//#define DEBUG_MMU
7
//#define DEBUG_MXCC
8
//#define DEBUG_UNALIGNED
9
//#define DEBUG_UNASSIGNED
10
//#define DEBUG_ASI
11
//#define DEBUG_PCALL
12
//#define DEBUG_PSTATE
13
//#define DEBUG_CACHE_CONTROL
14

    
15
#ifdef DEBUG_MMU
16
#define DPRINTF_MMU(fmt, ...)                                   \
17
    do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
18
#else
19
#define DPRINTF_MMU(fmt, ...) do {} while (0)
20
#endif
21

    
22
#ifdef DEBUG_MXCC
23
#define DPRINTF_MXCC(fmt, ...)                                  \
24
    do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
25
#else
26
#define DPRINTF_MXCC(fmt, ...) do {} while (0)
27
#endif
28

    
29
#ifdef DEBUG_ASI
30
#define DPRINTF_ASI(fmt, ...)                                   \
31
    do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
32
#endif
33

    
34
#ifdef DEBUG_PSTATE
35
#define DPRINTF_PSTATE(fmt, ...)                                   \
36
    do { printf("PSTATE: " fmt , ## __VA_ARGS__); } while (0)
37
#else
38
#define DPRINTF_PSTATE(fmt, ...) do {} while (0)
39
#endif
40

    
41
#ifdef DEBUG_CACHE_CONTROL
42
#define DPRINTF_CACHE_CONTROL(fmt, ...)                                   \
43
    do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
44
#else
45
#define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
46
#endif
47

    
48
#ifdef TARGET_SPARC64
49
#ifndef TARGET_ABI32
50
#define AM_CHECK(env1) ((env1)->pstate & PS_AM)
51
#else
52
#define AM_CHECK(env1) (1)
53
#endif
54
#endif
55

    
56
#define DT0 (env->dt0)
57
#define DT1 (env->dt1)
58
#define QT0 (env->qt0)
59
#define QT1 (env->qt1)
60

    
61
/* Leon3 cache control */
62

    
63
/* Cache control: emulate the behavior of cache control registers but without
64
   any effect on the emulated */
65

    
66
#define CACHE_STATE_MASK 0x3
67
#define CACHE_DISABLED   0x0
68
#define CACHE_FROZEN     0x1
69
#define CACHE_ENABLED    0x3
70

    
71
/* Cache Control register fields */
72

    
73
#define CACHE_CTRL_IF (1 <<  4)  /* Instruction Cache Freeze on Interrupt */
74
#define CACHE_CTRL_DF (1 <<  5)  /* Data Cache Freeze on Interrupt */
75
#define CACHE_CTRL_DP (1 << 14)  /* Data cache flush pending */
76
#define CACHE_CTRL_IP (1 << 15)  /* Instruction cache flush pending */
77
#define CACHE_CTRL_IB (1 << 16)  /* Instruction burst fetch */
78
#define CACHE_CTRL_FI (1 << 21)  /* Flush Instruction cache (Write only) */
79
#define CACHE_CTRL_FD (1 << 22)  /* Flush Data cache (Write only) */
80
#define CACHE_CTRL_DS (1 << 23)  /* Data cache snoop enable */
81

    
82
#if defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
83
static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
84
                          int is_asi, int size);
85
#endif
86

    
87
#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
88
// Calculates TSB pointer value for fault page size 8k or 64k
89
static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
90
                                       uint64_t tag_access_register,
91
                                       int page_size)
92
{
93
    uint64_t tsb_base = tsb_register & ~0x1fffULL;
94
    int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
95
    int tsb_size  = tsb_register & 0xf;
96

    
97
    // discard lower 13 bits which hold tag access context
98
    uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
99

    
100
    // now reorder bits
101
    uint64_t tsb_base_mask = ~0x1fffULL;
102
    uint64_t va = tag_access_va;
103

    
104
    // move va bits to correct position
105
    if (page_size == 8*1024) {
106
        va >>= 9;
107
    } else if (page_size == 64*1024) {
108
        va >>= 12;
109
    }
110

    
111
    if (tsb_size) {
112
        tsb_base_mask <<= tsb_size;
113
    }
114

    
115
    // calculate tsb_base mask and adjust va if split is in use
116
    if (tsb_split) {
117
        if (page_size == 8*1024) {
118
            va &= ~(1ULL << (13 + tsb_size));
119
        } else if (page_size == 64*1024) {
120
            va |= (1ULL << (13 + tsb_size));
121
        }
122
        tsb_base_mask <<= 1;
123
    }
124

    
125
    return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
126
}
127

    
128
// Calculates tag target register value by reordering bits
129
// in tag access register
130
static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
131
{
132
    return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
133
}
134

    
135
static void replace_tlb_entry(SparcTLBEntry *tlb,
136
                              uint64_t tlb_tag, uint64_t tlb_tte,
137
                              CPUState *env1)
138
{
139
    target_ulong mask, size, va, offset;
140

    
141
    // flush page range if translation is valid
142
    if (TTE_IS_VALID(tlb->tte)) {
143

    
144
        mask = 0xffffffffffffe000ULL;
145
        mask <<= 3 * ((tlb->tte >> 61) & 3);
146
        size = ~mask + 1;
147

    
148
        va = tlb->tag & mask;
149

    
150
        for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
151
            tlb_flush_page(env1, va + offset);
152
        }
153
    }
154

    
155
    tlb->tag = tlb_tag;
156
    tlb->tte = tlb_tte;
157
}
158

    
159
static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
160
                      const char* strmmu, CPUState *env1)
161
{
162
    unsigned int i;
163
    target_ulong mask;
164
    uint64_t context;
165

    
166
    int is_demap_context = (demap_addr >> 6) & 1;
167

    
168
    // demap context
169
    switch ((demap_addr >> 4) & 3) {
170
    case 0: // primary
171
        context = env1->dmmu.mmu_primary_context;
172
        break;
173
    case 1: // secondary
174
        context = env1->dmmu.mmu_secondary_context;
175
        break;
176
    case 2: // nucleus
177
        context = 0;
178
        break;
179
    case 3: // reserved
180
    default:
181
        return;
182
    }
183

    
184
    for (i = 0; i < 64; i++) {
185
        if (TTE_IS_VALID(tlb[i].tte)) {
186

    
187
            if (is_demap_context) {
188
                // will remove non-global entries matching context value
189
                if (TTE_IS_GLOBAL(tlb[i].tte) ||
190
                    !tlb_compare_context(&tlb[i], context)) {
191
                    continue;
192
                }
193
            } else {
194
                // demap page
195
                // will remove any entry matching VA
196
                mask = 0xffffffffffffe000ULL;
197
                mask <<= 3 * ((tlb[i].tte >> 61) & 3);
198

    
199
                if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
200
                    continue;
201
                }
202

    
203
                // entry should be global or matching context value
204
                if (!TTE_IS_GLOBAL(tlb[i].tte) &&
205
                    !tlb_compare_context(&tlb[i], context)) {
206
                    continue;
207
                }
208
            }
209

    
210
            replace_tlb_entry(&tlb[i], 0, 0, env1);
211
#ifdef DEBUG_MMU
212
            DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
213
            dump_mmu(stdout, fprintf, env1);
214
#endif
215
        }
216
    }
217
}
218

    
219
static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
220
                                 uint64_t tlb_tag, uint64_t tlb_tte,
221
                                 const char* strmmu, CPUState *env1)
222
{
223
    unsigned int i, replace_used;
224

    
225
    // Try replacing invalid entry
226
    for (i = 0; i < 64; i++) {
227
        if (!TTE_IS_VALID(tlb[i].tte)) {
228
            replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
229
#ifdef DEBUG_MMU
230
            DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
231
            dump_mmu(stdout, fprintf, env1);
232
#endif
233
            return;
234
        }
235
    }
236

    
237
    // All entries are valid, try replacing unlocked entry
238

    
239
    for (replace_used = 0; replace_used < 2; ++replace_used) {
240

    
241
        // Used entries are not replaced on first pass
242

    
243
        for (i = 0; i < 64; i++) {
244
            if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
245

    
246
                replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
247
#ifdef DEBUG_MMU
248
                DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
249
                            strmmu, (replace_used?"used":"unused"), i);
250
                dump_mmu(stdout, fprintf, env1);
251
#endif
252
                return;
253
            }
254
        }
255

    
256
        // Now reset used bit and search for unused entries again
257

    
258
        for (i = 0; i < 64; i++) {
259
            TTE_SET_UNUSED(tlb[i].tte);
260
        }
261
    }
262

    
263
#ifdef DEBUG_MMU
264
    DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
265
#endif
266
    // error state?
267
}
268

    
269
#endif
270

    
271
static inline target_ulong address_mask(CPUState *env1, target_ulong addr)
272
{
273
#ifdef TARGET_SPARC64
274
    if (AM_CHECK(env1))
275
        addr &= 0xffffffffULL;
276
#endif
277
    return addr;
278
}
279

    
280
/* returns true if access using this ASI is to have address translated by MMU
281
   otherwise access is to raw physical address */
282
static inline int is_translating_asi(int asi)
283
{
284
#ifdef TARGET_SPARC64
285
    /* Ultrasparc IIi translating asi
286
       - note this list is defined by cpu implementation
287
     */
288
    switch (asi) {
289
    case 0x04 ... 0x11:
290
    case 0x18 ... 0x19:
291
    case 0x24 ... 0x2C:
292
    case 0x70 ... 0x73:
293
    case 0x78 ... 0x79:
294
    case 0x80 ... 0xFF:
295
        return 1;
296

    
297
    default:
298
        return 0;
299
    }
300
#else
301
    /* TODO: check sparc32 bits */
302
    return 0;
303
#endif
304
}
305

    
306
static inline target_ulong asi_address_mask(CPUState *env1,
307
                                            int asi, target_ulong addr)
308
{
309
    if (is_translating_asi(asi)) {
310
        return address_mask(env, addr);
311
    } else {
312
        return addr;
313
    }
314
}
315

    
316
static void raise_exception(int tt)
317
{
318
    env->exception_index = tt;
319
    cpu_loop_exit(env);
320
}
321

    
322
void HELPER(raise_exception)(int tt)
323
{
324
    raise_exception(tt);
325
}
326

    
327
void helper_shutdown(void)
328
{
329
#if !defined(CONFIG_USER_ONLY)
330
    qemu_system_shutdown_request();
331
#endif
332
}
333

    
334
void helper_check_align(target_ulong addr, uint32_t align)
335
{
336
    if (addr & align) {
337
#ifdef DEBUG_UNALIGNED
338
    printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
339
           "\n", addr, env->pc);
340
#endif
341
        raise_exception(TT_UNALIGNED);
342
    }
343
}
344

    
345
#define F_HELPER(name, p) void helper_f##name##p(void)
346

    
347
#define F_BINOP(name)                                           \
348
    float32 helper_f ## name ## s (float32 src1, float32 src2)  \
349
    {                                                           \
350
        return float32_ ## name (src1, src2, &env->fp_status);  \
351
    }                                                           \
352
    F_HELPER(name, d)                                           \
353
    {                                                           \
354
        DT0 = float64_ ## name (DT0, DT1, &env->fp_status);     \
355
    }                                                           \
356
    F_HELPER(name, q)                                           \
357
    {                                                           \
358
        QT0 = float128_ ## name (QT0, QT1, &env->fp_status);    \
359
    }
360

    
361
F_BINOP(add);
362
F_BINOP(sub);
363
F_BINOP(mul);
364
F_BINOP(div);
365
#undef F_BINOP
366

    
367
void helper_fsmuld(float32 src1, float32 src2)
368
{
369
    DT0 = float64_mul(float32_to_float64(src1, &env->fp_status),
370
                      float32_to_float64(src2, &env->fp_status),
371
                      &env->fp_status);
372
}
373

    
374
void helper_fdmulq(void)
375
{
376
    QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
377
                       float64_to_float128(DT1, &env->fp_status),
378
                       &env->fp_status);
379
}
380

    
381
float32 helper_fnegs(float32 src)
382
{
383
    return float32_chs(src);
384
}
385

    
386
#ifdef TARGET_SPARC64
387
F_HELPER(neg, d)
388
{
389
    DT0 = float64_chs(DT1);
390
}
391

    
392
F_HELPER(neg, q)
393
{
394
    QT0 = float128_chs(QT1);
395
}
396
#endif
397

    
398
/* Integer to float conversion.  */
399
float32 helper_fitos(int32_t src)
400
{
401
    return int32_to_float32(src, &env->fp_status);
402
}
403

    
404
void helper_fitod(int32_t src)
405
{
406
    DT0 = int32_to_float64(src, &env->fp_status);
407
}
408

    
409
void helper_fitoq(int32_t src)
410
{
411
    QT0 = int32_to_float128(src, &env->fp_status);
412
}
413

    
414
#ifdef TARGET_SPARC64
415
float32 helper_fxtos(void)
416
{
417
    return int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
418
}
419

    
420
F_HELPER(xto, d)
421
{
422
    DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
423
}
424

    
425
F_HELPER(xto, q)
426
{
427
    QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
428
}
429
#endif
430
#undef F_HELPER
431

    
432
/* floating point conversion */
433
float32 helper_fdtos(void)
434
{
435
    return float64_to_float32(DT1, &env->fp_status);
436
}
437

    
438
void helper_fstod(float32 src)
439
{
440
    DT0 = float32_to_float64(src, &env->fp_status);
441
}
442

    
443
float32 helper_fqtos(void)
444
{
445
    return float128_to_float32(QT1, &env->fp_status);
446
}
447

    
448
void helper_fstoq(float32 src)
449
{
450
    QT0 = float32_to_float128(src, &env->fp_status);
451
}
452

    
453
void helper_fqtod(void)
454
{
455
    DT0 = float128_to_float64(QT1, &env->fp_status);
456
}
457

    
458
void helper_fdtoq(void)
459
{
460
    QT0 = float64_to_float128(DT1, &env->fp_status);
461
}
462

    
463
/* Float to integer conversion.  */
464
int32_t helper_fstoi(float32 src)
465
{
466
    return float32_to_int32_round_to_zero(src, &env->fp_status);
467
}
468

    
469
int32_t helper_fdtoi(void)
470
{
471
    return float64_to_int32_round_to_zero(DT1, &env->fp_status);
472
}
473

    
474
int32_t helper_fqtoi(void)
475
{
476
    return float128_to_int32_round_to_zero(QT1, &env->fp_status);
477
}
478

    
479
#ifdef TARGET_SPARC64
480
void helper_fstox(float32 src)
481
{
482
    *((int64_t *)&DT0) = float32_to_int64_round_to_zero(src, &env->fp_status);
483
}
484

    
485
void helper_fdtox(void)
486
{
487
    *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
488
}
489

    
490
void helper_fqtox(void)
491
{
492
    *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
493
}
494

    
495
void helper_faligndata(void)
496
{
497
    uint64_t tmp;
498

    
499
    tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
500
    /* on many architectures a shift of 64 does nothing */
501
    if ((env->gsr & 7) != 0) {
502
        tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
503
    }
504
    *((uint64_t *)&DT0) = tmp;
505
}
506

    
507
#ifdef HOST_WORDS_BIGENDIAN
508
#define VIS_B64(n) b[7 - (n)]
509
#define VIS_W64(n) w[3 - (n)]
510
#define VIS_SW64(n) sw[3 - (n)]
511
#define VIS_L64(n) l[1 - (n)]
512
#define VIS_B32(n) b[3 - (n)]
513
#define VIS_W32(n) w[1 - (n)]
514
#else
515
#define VIS_B64(n) b[n]
516
#define VIS_W64(n) w[n]
517
#define VIS_SW64(n) sw[n]
518
#define VIS_L64(n) l[n]
519
#define VIS_B32(n) b[n]
520
#define VIS_W32(n) w[n]
521
#endif
522

    
523
typedef union {
524
    uint8_t b[8];
525
    uint16_t w[4];
526
    int16_t sw[4];
527
    uint32_t l[2];
528
    float64 d;
529
} vis64;
530

    
531
typedef union {
532
    uint8_t b[4];
533
    uint16_t w[2];
534
    uint32_t l;
535
    float32 f;
536
} vis32;
537

    
538
void helper_fpmerge(void)
539
{
540
    vis64 s, d;
541

    
542
    s.d = DT0;
543
    d.d = DT1;
544

    
545
    // Reverse calculation order to handle overlap
546
    d.VIS_B64(7) = s.VIS_B64(3);
547
    d.VIS_B64(6) = d.VIS_B64(3);
548
    d.VIS_B64(5) = s.VIS_B64(2);
549
    d.VIS_B64(4) = d.VIS_B64(2);
550
    d.VIS_B64(3) = s.VIS_B64(1);
551
    d.VIS_B64(2) = d.VIS_B64(1);
552
    d.VIS_B64(1) = s.VIS_B64(0);
553
    //d.VIS_B64(0) = d.VIS_B64(0);
554

    
555
    DT0 = d.d;
556
}
557

    
558
void helper_fmul8x16(void)
559
{
560
    vis64 s, d;
561
    uint32_t tmp;
562

    
563
    s.d = DT0;
564
    d.d = DT1;
565

    
566
#define PMUL(r)                                                 \
567
    tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r);       \
568
    if ((tmp & 0xff) > 0x7f)                                    \
569
        tmp += 0x100;                                           \
570
    d.VIS_W64(r) = tmp >> 8;
571

    
572
    PMUL(0);
573
    PMUL(1);
574
    PMUL(2);
575
    PMUL(3);
576
#undef PMUL
577

    
578
    DT0 = d.d;
579
}
580

    
581
void helper_fmul8x16al(void)
582
{
583
    vis64 s, d;
584
    uint32_t tmp;
585

    
586
    s.d = DT0;
587
    d.d = DT1;
588

    
589
#define PMUL(r)                                                 \
590
    tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r);       \
591
    if ((tmp & 0xff) > 0x7f)                                    \
592
        tmp += 0x100;                                           \
593
    d.VIS_W64(r) = tmp >> 8;
594

    
595
    PMUL(0);
596
    PMUL(1);
597
    PMUL(2);
598
    PMUL(3);
599
#undef PMUL
600

    
601
    DT0 = d.d;
602
}
603

    
604
void helper_fmul8x16au(void)
605
{
606
    vis64 s, d;
607
    uint32_t tmp;
608

    
609
    s.d = DT0;
610
    d.d = DT1;
611

    
612
#define PMUL(r)                                                 \
613
    tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r);       \
614
    if ((tmp & 0xff) > 0x7f)                                    \
615
        tmp += 0x100;                                           \
616
    d.VIS_W64(r) = tmp >> 8;
617

    
618
    PMUL(0);
619
    PMUL(1);
620
    PMUL(2);
621
    PMUL(3);
622
#undef PMUL
623

    
624
    DT0 = d.d;
625
}
626

    
627
void helper_fmul8sux16(void)
628
{
629
    vis64 s, d;
630
    uint32_t tmp;
631

    
632
    s.d = DT0;
633
    d.d = DT1;
634

    
635
#define PMUL(r)                                                         \
636
    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
637
    if ((tmp & 0xff) > 0x7f)                                            \
638
        tmp += 0x100;                                                   \
639
    d.VIS_W64(r) = tmp >> 8;
640

    
641
    PMUL(0);
642
    PMUL(1);
643
    PMUL(2);
644
    PMUL(3);
645
#undef PMUL
646

    
647
    DT0 = d.d;
648
}
649

    
650
void helper_fmul8ulx16(void)
651
{
652
    vis64 s, d;
653
    uint32_t tmp;
654

    
655
    s.d = DT0;
656
    d.d = DT1;
657

    
658
#define PMUL(r)                                                         \
659
    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
660
    if ((tmp & 0xff) > 0x7f)                                            \
661
        tmp += 0x100;                                                   \
662
    d.VIS_W64(r) = tmp >> 8;
663

    
664
    PMUL(0);
665
    PMUL(1);
666
    PMUL(2);
667
    PMUL(3);
668
#undef PMUL
669

    
670
    DT0 = d.d;
671
}
672

    
673
void helper_fmuld8sux16(void)
674
{
675
    vis64 s, d;
676
    uint32_t tmp;
677

    
678
    s.d = DT0;
679
    d.d = DT1;
680

    
681
#define PMUL(r)                                                         \
682
    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
683
    if ((tmp & 0xff) > 0x7f)                                            \
684
        tmp += 0x100;                                                   \
685
    d.VIS_L64(r) = tmp;
686

    
687
    // Reverse calculation order to handle overlap
688
    PMUL(1);
689
    PMUL(0);
690
#undef PMUL
691

    
692
    DT0 = d.d;
693
}
694

    
695
void helper_fmuld8ulx16(void)
696
{
697
    vis64 s, d;
698
    uint32_t tmp;
699

    
700
    s.d = DT0;
701
    d.d = DT1;
702

    
703
#define PMUL(r)                                                         \
704
    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
705
    if ((tmp & 0xff) > 0x7f)                                            \
706
        tmp += 0x100;                                                   \
707
    d.VIS_L64(r) = tmp;
708

    
709
    // Reverse calculation order to handle overlap
710
    PMUL(1);
711
    PMUL(0);
712
#undef PMUL
713

    
714
    DT0 = d.d;
715
}
716

    
717
void helper_fexpand(void)
718
{
719
    vis32 s;
720
    vis64 d;
721

    
722
    s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
723
    d.d = DT1;
724
    d.VIS_W64(0) = s.VIS_B32(0) << 4;
725
    d.VIS_W64(1) = s.VIS_B32(1) << 4;
726
    d.VIS_W64(2) = s.VIS_B32(2) << 4;
727
    d.VIS_W64(3) = s.VIS_B32(3) << 4;
728

    
729
    DT0 = d.d;
730
}
731

    
732
#define VIS_HELPER(name, F)                             \
733
    void name##16(void)                                 \
734
    {                                                   \
735
        vis64 s, d;                                     \
736
                                                        \
737
        s.d = DT0;                                      \
738
        d.d = DT1;                                      \
739
                                                        \
740
        d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0));   \
741
        d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1));   \
742
        d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2));   \
743
        d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3));   \
744
                                                        \
745
        DT0 = d.d;                                      \
746
    }                                                   \
747
                                                        \
748
    uint32_t name##16s(uint32_t src1, uint32_t src2)    \
749
    {                                                   \
750
        vis32 s, d;                                     \
751
                                                        \
752
        s.l = src1;                                     \
753
        d.l = src2;                                     \
754
                                                        \
755
        d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0));   \
756
        d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1));   \
757
                                                        \
758
        return d.l;                                     \
759
    }                                                   \
760
                                                        \
761
    void name##32(void)                                 \
762
    {                                                   \
763
        vis64 s, d;                                     \
764
                                                        \
765
        s.d = DT0;                                      \
766
        d.d = DT1;                                      \
767
                                                        \
768
        d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0));   \
769
        d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1));   \
770
                                                        \
771
        DT0 = d.d;                                      \
772
    }                                                   \
773
                                                        \
774
    uint32_t name##32s(uint32_t src1, uint32_t src2)    \
775
    {                                                   \
776
        vis32 s, d;                                     \
777
                                                        \
778
        s.l = src1;                                     \
779
        d.l = src2;                                     \
780
                                                        \
781
        d.l = F(d.l, s.l);                              \
782
                                                        \
783
        return d.l;                                     \
784
    }
785

    
786
#define FADD(a, b) ((a) + (b))
787
#define FSUB(a, b) ((a) - (b))
788
VIS_HELPER(helper_fpadd, FADD)
789
VIS_HELPER(helper_fpsub, FSUB)
790

    
791
#define VIS_CMPHELPER(name, F)                                        \
792
    void name##16(void)                                           \
793
    {                                                             \
794
        vis64 s, d;                                               \
795
                                                                  \
796
        s.d = DT0;                                                \
797
        d.d = DT1;                                                \
798
                                                                  \
799
        d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0;       \
800
        d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0;      \
801
        d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0;      \
802
        d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0;      \
803
                                                                  \
804
        DT0 = d.d;                                                \
805
    }                                                             \
806
                                                                  \
807
    void name##32(void)                                           \
808
    {                                                             \
809
        vis64 s, d;                                               \
810
                                                                  \
811
        s.d = DT0;                                                \
812
        d.d = DT1;                                                \
813
                                                                  \
814
        d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0;       \
815
        d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0;      \
816
                                                                  \
817
        DT0 = d.d;                                                \
818
    }
819

    
820
#define FCMPGT(a, b) ((a) > (b))
821
#define FCMPEQ(a, b) ((a) == (b))
822
#define FCMPLE(a, b) ((a) <= (b))
823
#define FCMPNE(a, b) ((a) != (b))
824

    
825
VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
826
VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
827
VIS_CMPHELPER(helper_fcmple, FCMPLE)
828
VIS_CMPHELPER(helper_fcmpne, FCMPNE)
829
#endif
830

    
831
void helper_check_ieee_exceptions(void)
832
{
833
    target_ulong status;
834

    
835
    status = get_float_exception_flags(&env->fp_status);
836
    if (status) {
837
        /* Copy IEEE 754 flags into FSR */
838
        if (status & float_flag_invalid)
839
            env->fsr |= FSR_NVC;
840
        if (status & float_flag_overflow)
841
            env->fsr |= FSR_OFC;
842
        if (status & float_flag_underflow)
843
            env->fsr |= FSR_UFC;
844
        if (status & float_flag_divbyzero)
845
            env->fsr |= FSR_DZC;
846
        if (status & float_flag_inexact)
847
            env->fsr |= FSR_NXC;
848

    
849
        if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
850
            /* Unmasked exception, generate a trap */
851
            env->fsr |= FSR_FTT_IEEE_EXCP;
852
            raise_exception(TT_FP_EXCP);
853
        } else {
854
            /* Accumulate exceptions */
855
            env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
856
        }
857
    }
858
}
859

    
860
void helper_clear_float_exceptions(void)
861
{
862
    set_float_exception_flags(0, &env->fp_status);
863
}
864

    
865
float32 helper_fabss(float32 src)
866
{
867
    return float32_abs(src);
868
}
869

    
870
#ifdef TARGET_SPARC64
871
void helper_fabsd(void)
872
{
873
    DT0 = float64_abs(DT1);
874
}
875

    
876
void helper_fabsq(void)
877
{
878
    QT0 = float128_abs(QT1);
879
}
880
#endif
881

    
882
float32 helper_fsqrts(float32 src)
883
{
884
    return float32_sqrt(src, &env->fp_status);
885
}
886

    
887
void helper_fsqrtd(void)
888
{
889
    DT0 = float64_sqrt(DT1, &env->fp_status);
890
}
891

    
892
void helper_fsqrtq(void)
893
{
894
    QT0 = float128_sqrt(QT1, &env->fp_status);
895
}
896

    
897
#define GEN_FCMP(name, size, reg1, reg2, FS, E)                         \
898
    void glue(helper_, name) (void)                                     \
899
    {                                                                   \
900
        env->fsr &= FSR_FTT_NMASK;                                      \
901
        if (E && (glue(size, _is_any_nan)(reg1) ||                      \
902
                     glue(size, _is_any_nan)(reg2)) &&                  \
903
            (env->fsr & FSR_NVM)) {                                     \
904
            env->fsr |= FSR_NVC;                                        \
905
            env->fsr |= FSR_FTT_IEEE_EXCP;                              \
906
            raise_exception(TT_FP_EXCP);                                \
907
        }                                                               \
908
        switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) {   \
909
        case float_relation_unordered:                                  \
910
            if ((env->fsr & FSR_NVM)) {                                 \
911
                env->fsr |= FSR_NVC;                                    \
912
                env->fsr |= FSR_FTT_IEEE_EXCP;                          \
913
                raise_exception(TT_FP_EXCP);                            \
914
            } else {                                                    \
915
                env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);             \
916
                env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS;                \
917
                env->fsr |= FSR_NVA;                                    \
918
            }                                                           \
919
            break;                                                      \
920
        case float_relation_less:                                       \
921
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
922
            env->fsr |= FSR_FCC0 << FS;                                 \
923
            break;                                                      \
924
        case float_relation_greater:                                    \
925
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
926
            env->fsr |= FSR_FCC1 << FS;                                 \
927
            break;                                                      \
928
        default:                                                        \
929
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
930
            break;                                                      \
931
        }                                                               \
932
    }
933
#define GEN_FCMPS(name, size, FS, E)                                    \
934
    void glue(helper_, name)(float32 src1, float32 src2)                \
935
    {                                                                   \
936
        env->fsr &= FSR_FTT_NMASK;                                      \
937
        if (E && (glue(size, _is_any_nan)(src1) ||                      \
938
                     glue(size, _is_any_nan)(src2)) &&                  \
939
            (env->fsr & FSR_NVM)) {                                     \
940
            env->fsr |= FSR_NVC;                                        \
941
            env->fsr |= FSR_FTT_IEEE_EXCP;                              \
942
            raise_exception(TT_FP_EXCP);                                \
943
        }                                                               \
944
        switch (glue(size, _compare) (src1, src2, &env->fp_status)) {   \
945
        case float_relation_unordered:                                  \
946
            if ((env->fsr & FSR_NVM)) {                                 \
947
                env->fsr |= FSR_NVC;                                    \
948
                env->fsr |= FSR_FTT_IEEE_EXCP;                          \
949
                raise_exception(TT_FP_EXCP);                            \
950
            } else {                                                    \
951
                env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);             \
952
                env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS;                \
953
                env->fsr |= FSR_NVA;                                    \
954
            }                                                           \
955
            break;                                                      \
956
        case float_relation_less:                                       \
957
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
958
            env->fsr |= FSR_FCC0 << FS;                                 \
959
            break;                                                      \
960
        case float_relation_greater:                                    \
961
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
962
            env->fsr |= FSR_FCC1 << FS;                                 \
963
            break;                                                      \
964
        default:                                                        \
965
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
966
            break;                                                      \
967
        }                                                               \
968
    }
969

    
970
GEN_FCMPS(fcmps, float32, 0, 0);
971
GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
972

    
973
GEN_FCMPS(fcmpes, float32, 0, 1);
974
GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
975

    
976
GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
977
GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
978

    
979
static uint32_t compute_all_flags(void)
980
{
981
    return env->psr & PSR_ICC;
982
}
983

    
984
static uint32_t compute_C_flags(void)
985
{
986
    return env->psr & PSR_CARRY;
987
}
988

    
989
static inline uint32_t get_NZ_icc(int32_t dst)
990
{
991
    uint32_t ret = 0;
992

    
993
    if (dst == 0) {
994
        ret = PSR_ZERO;
995
    } else if (dst < 0) {
996
        ret = PSR_NEG;
997
    }
998
    return ret;
999
}
1000

    
1001
#ifdef TARGET_SPARC64
1002
static uint32_t compute_all_flags_xcc(void)
1003
{
1004
    return env->xcc & PSR_ICC;
1005
}
1006

    
1007
static uint32_t compute_C_flags_xcc(void)
1008
{
1009
    return env->xcc & PSR_CARRY;
1010
}
1011

    
1012
static inline uint32_t get_NZ_xcc(target_long dst)
1013
{
1014
    uint32_t ret = 0;
1015

    
1016
    if (!dst) {
1017
        ret = PSR_ZERO;
1018
    } else if (dst < 0) {
1019
        ret = PSR_NEG;
1020
    }
1021
    return ret;
1022
}
1023
#endif
1024

    
1025
static inline uint32_t get_V_div_icc(target_ulong src2)
1026
{
1027
    uint32_t ret = 0;
1028

    
1029
    if (src2 != 0) {
1030
        ret = PSR_OVF;
1031
    }
1032
    return ret;
1033
}
1034

    
1035
static uint32_t compute_all_div(void)
1036
{
1037
    uint32_t ret;
1038

    
1039
    ret = get_NZ_icc(CC_DST);
1040
    ret |= get_V_div_icc(CC_SRC2);
1041
    return ret;
1042
}
1043

    
1044
static uint32_t compute_C_div(void)
1045
{
1046
    return 0;
1047
}
1048

    
1049
static inline uint32_t get_C_add_icc(uint32_t dst, uint32_t src1)
1050
{
1051
    uint32_t ret = 0;
1052

    
1053
    if (dst < src1) {
1054
        ret = PSR_CARRY;
1055
    }
1056
    return ret;
1057
}
1058

    
1059
static inline uint32_t get_C_addx_icc(uint32_t dst, uint32_t src1,
1060
                                      uint32_t src2)
1061
{
1062
    uint32_t ret = 0;
1063

    
1064
    if (((src1 & src2) | (~dst & (src1 | src2))) & (1U << 31)) {
1065
        ret = PSR_CARRY;
1066
    }
1067
    return ret;
1068
}
1069

    
1070
static inline uint32_t get_V_add_icc(uint32_t dst, uint32_t src1,
1071
                                     uint32_t src2)
1072
{
1073
    uint32_t ret = 0;
1074

    
1075
    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1U << 31)) {
1076
        ret = PSR_OVF;
1077
    }
1078
    return ret;
1079
}
1080

    
1081
#ifdef TARGET_SPARC64
1082
static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1)
1083
{
1084
    uint32_t ret = 0;
1085

    
1086
    if (dst < src1) {
1087
        ret = PSR_CARRY;
1088
    }
1089
    return ret;
1090
}
1091

    
1092
static inline uint32_t get_C_addx_xcc(target_ulong dst, target_ulong src1,
1093
                                      target_ulong src2)
1094
{
1095
    uint32_t ret = 0;
1096

    
1097
    if (((src1 & src2) | (~dst & (src1 | src2))) & (1ULL << 63)) {
1098
        ret = PSR_CARRY;
1099
    }
1100
    return ret;
1101
}
1102

    
1103
static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1,
1104
                                         target_ulong src2)
1105
{
1106
    uint32_t ret = 0;
1107

    
1108
    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63)) {
1109
        ret = PSR_OVF;
1110
    }
1111
    return ret;
1112
}
1113

    
1114
static uint32_t compute_all_add_xcc(void)
1115
{
1116
    uint32_t ret;
1117

    
1118
    ret = get_NZ_xcc(CC_DST);
1119
    ret |= get_C_add_xcc(CC_DST, CC_SRC);
1120
    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1121
    return ret;
1122
}
1123

    
1124
static uint32_t compute_C_add_xcc(void)
1125
{
1126
    return get_C_add_xcc(CC_DST, CC_SRC);
1127
}
1128
#endif
1129

    
1130
static uint32_t compute_all_add(void)
1131
{
1132
    uint32_t ret;
1133

    
1134
    ret = get_NZ_icc(CC_DST);
1135
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1136
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1137
    return ret;
1138
}
1139

    
1140
static uint32_t compute_C_add(void)
1141
{
1142
    return get_C_add_icc(CC_DST, CC_SRC);
1143
}
1144

    
1145
#ifdef TARGET_SPARC64
1146
static uint32_t compute_all_addx_xcc(void)
1147
{
1148
    uint32_t ret;
1149

    
1150
    ret = get_NZ_xcc(CC_DST);
1151
    ret |= get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1152
    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1153
    return ret;
1154
}
1155

    
1156
static uint32_t compute_C_addx_xcc(void)
1157
{
1158
    uint32_t ret;
1159

    
1160
    ret = get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1161
    return ret;
1162
}
1163
#endif
1164

    
1165
static uint32_t compute_all_addx(void)
1166
{
1167
    uint32_t ret;
1168

    
1169
    ret = get_NZ_icc(CC_DST);
1170
    ret |= get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1171
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1172
    return ret;
1173
}
1174

    
1175
static uint32_t compute_C_addx(void)
1176
{
1177
    uint32_t ret;
1178

    
1179
    ret = get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1180
    return ret;
1181
}
1182

    
1183
static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2)
1184
{
1185
    uint32_t ret = 0;
1186

    
1187
    if ((src1 | src2) & 0x3) {
1188
        ret = PSR_OVF;
1189
    }
1190
    return ret;
1191
}
1192

    
1193
static uint32_t compute_all_tadd(void)
1194
{
1195
    uint32_t ret;
1196

    
1197
    ret = get_NZ_icc(CC_DST);
1198
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1199
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1200
    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1201
    return ret;
1202
}
1203

    
1204
static uint32_t compute_all_taddtv(void)
1205
{
1206
    uint32_t ret;
1207

    
1208
    ret = get_NZ_icc(CC_DST);
1209
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1210
    return ret;
1211
}
1212

    
1213
static inline uint32_t get_C_sub_icc(uint32_t src1, uint32_t src2)
1214
{
1215
    uint32_t ret = 0;
1216

    
1217
    if (src1 < src2) {
1218
        ret = PSR_CARRY;
1219
    }
1220
    return ret;
1221
}
1222

    
1223
static inline uint32_t get_C_subx_icc(uint32_t dst, uint32_t src1,
1224
                                      uint32_t src2)
1225
{
1226
    uint32_t ret = 0;
1227

    
1228
    if (((~src1 & src2) | (dst & (~src1 | src2))) & (1U << 31)) {
1229
        ret = PSR_CARRY;
1230
    }
1231
    return ret;
1232
}
1233

    
1234
static inline uint32_t get_V_sub_icc(uint32_t dst, uint32_t src1,
1235
                                     uint32_t src2)
1236
{
1237
    uint32_t ret = 0;
1238

    
1239
    if (((src1 ^ src2) & (src1 ^ dst)) & (1U << 31)) {
1240
        ret = PSR_OVF;
1241
    }
1242
    return ret;
1243
}
1244

    
1245

    
1246
#ifdef TARGET_SPARC64
1247
static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2)
1248
{
1249
    uint32_t ret = 0;
1250

    
1251
    if (src1 < src2) {
1252
        ret = PSR_CARRY;
1253
    }
1254
    return ret;
1255
}
1256

    
1257
static inline uint32_t get_C_subx_xcc(target_ulong dst, target_ulong src1,
1258
                                      target_ulong src2)
1259
{
1260
    uint32_t ret = 0;
1261

    
1262
    if (((~src1 & src2) | (dst & (~src1 | src2))) & (1ULL << 63)) {
1263
        ret = PSR_CARRY;
1264
    }
1265
    return ret;
1266
}
1267

    
1268
static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1,
1269
                                     target_ulong src2)
1270
{
1271
    uint32_t ret = 0;
1272

    
1273
    if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63)) {
1274
        ret = PSR_OVF;
1275
    }
1276
    return ret;
1277
}
1278

    
1279
static uint32_t compute_all_sub_xcc(void)
1280
{
1281
    uint32_t ret;
1282

    
1283
    ret = get_NZ_xcc(CC_DST);
1284
    ret |= get_C_sub_xcc(CC_SRC, CC_SRC2);
1285
    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1286
    return ret;
1287
}
1288

    
1289
static uint32_t compute_C_sub_xcc(void)
1290
{
1291
    return get_C_sub_xcc(CC_SRC, CC_SRC2);
1292
}
1293
#endif
1294

    
1295
static uint32_t compute_all_sub(void)
1296
{
1297
    uint32_t ret;
1298

    
1299
    ret = get_NZ_icc(CC_DST);
1300
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1301
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1302
    return ret;
1303
}
1304

    
1305
static uint32_t compute_C_sub(void)
1306
{
1307
    return get_C_sub_icc(CC_SRC, CC_SRC2);
1308
}
1309

    
1310
#ifdef TARGET_SPARC64
1311
static uint32_t compute_all_subx_xcc(void)
1312
{
1313
    uint32_t ret;
1314

    
1315
    ret = get_NZ_xcc(CC_DST);
1316
    ret |= get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1317
    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1318
    return ret;
1319
}
1320

    
1321
static uint32_t compute_C_subx_xcc(void)
1322
{
1323
    uint32_t ret;
1324

    
1325
    ret = get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1326
    return ret;
1327
}
1328
#endif
1329

    
1330
static uint32_t compute_all_subx(void)
1331
{
1332
    uint32_t ret;
1333

    
1334
    ret = get_NZ_icc(CC_DST);
1335
    ret |= get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1336
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1337
    return ret;
1338
}
1339

    
1340
static uint32_t compute_C_subx(void)
1341
{
1342
    uint32_t ret;
1343

    
1344
    ret = get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1345
    return ret;
1346
}
1347

    
1348
static uint32_t compute_all_tsub(void)
1349
{
1350
    uint32_t ret;
1351

    
1352
    ret = get_NZ_icc(CC_DST);
1353
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1354
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1355
    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1356
    return ret;
1357
}
1358

    
1359
static uint32_t compute_all_tsubtv(void)
1360
{
1361
    uint32_t ret;
1362

    
1363
    ret = get_NZ_icc(CC_DST);
1364
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1365
    return ret;
1366
}
1367

    
1368
static uint32_t compute_all_logic(void)
1369
{
1370
    return get_NZ_icc(CC_DST);
1371
}
1372

    
1373
static uint32_t compute_C_logic(void)
1374
{
1375
    return 0;
1376
}
1377

    
1378
#ifdef TARGET_SPARC64
1379
static uint32_t compute_all_logic_xcc(void)
1380
{
1381
    return get_NZ_xcc(CC_DST);
1382
}
1383
#endif
1384

    
1385
typedef struct CCTable {
1386
    uint32_t (*compute_all)(void); /* return all the flags */
1387
    uint32_t (*compute_c)(void);  /* return the C flag */
1388
} CCTable;
1389

    
1390
static const CCTable icc_table[CC_OP_NB] = {
1391
    /* CC_OP_DYNAMIC should never happen */
1392
    [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags },
1393
    [CC_OP_DIV] = { compute_all_div, compute_C_div },
1394
    [CC_OP_ADD] = { compute_all_add, compute_C_add },
1395
    [CC_OP_ADDX] = { compute_all_addx, compute_C_addx },
1396
    [CC_OP_TADD] = { compute_all_tadd, compute_C_add },
1397
    [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_add },
1398
    [CC_OP_SUB] = { compute_all_sub, compute_C_sub },
1399
    [CC_OP_SUBX] = { compute_all_subx, compute_C_subx },
1400
    [CC_OP_TSUB] = { compute_all_tsub, compute_C_sub },
1401
    [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_sub },
1402
    [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic },
1403
};
1404

    
1405
#ifdef TARGET_SPARC64
1406
static const CCTable xcc_table[CC_OP_NB] = {
1407
    /* CC_OP_DYNAMIC should never happen */
1408
    [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc },
1409
    [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic },
1410
    [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc },
1411
    [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc },
1412
    [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc },
1413
    [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc },
1414
    [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1415
    [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc },
1416
    [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1417
    [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc },
1418
    [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic },
1419
};
1420
#endif
1421

    
1422
void helper_compute_psr(void)
1423
{
1424
    uint32_t new_psr;
1425

    
1426
    new_psr = icc_table[CC_OP].compute_all();
1427
    env->psr = new_psr;
1428
#ifdef TARGET_SPARC64
1429
    new_psr = xcc_table[CC_OP].compute_all();
1430
    env->xcc = new_psr;
1431
#endif
1432
    CC_OP = CC_OP_FLAGS;
1433
}
1434

    
1435
uint32_t helper_compute_C_icc(void)
1436
{
1437
    uint32_t ret;
1438

    
1439
    ret = icc_table[CC_OP].compute_c() >> PSR_CARRY_SHIFT;
1440
    return ret;
1441
}
1442

    
1443
static inline void memcpy32(target_ulong *dst, const target_ulong *src)
1444
{
1445
    dst[0] = src[0];
1446
    dst[1] = src[1];
1447
    dst[2] = src[2];
1448
    dst[3] = src[3];
1449
    dst[4] = src[4];
1450
    dst[5] = src[5];
1451
    dst[6] = src[6];
1452
    dst[7] = src[7];
1453
}
1454

    
1455
static void set_cwp(int new_cwp)
1456
{
1457
    /* put the modified wrap registers at their proper location */
1458
    if (env->cwp == env->nwindows - 1) {
1459
        memcpy32(env->regbase, env->regbase + env->nwindows * 16);
1460
    }
1461
    env->cwp = new_cwp;
1462

    
1463
    /* put the wrap registers at their temporary location */
1464
    if (new_cwp == env->nwindows - 1) {
1465
        memcpy32(env->regbase + env->nwindows * 16, env->regbase);
1466
    }
1467
    env->regwptr = env->regbase + (new_cwp * 16);
1468
}
1469

    
1470
void cpu_set_cwp(CPUState *env1, int new_cwp)
1471
{
1472
    CPUState *saved_env;
1473

    
1474
    saved_env = env;
1475
    env = env1;
1476
    set_cwp(new_cwp);
1477
    env = saved_env;
1478
}
1479

    
1480
static target_ulong get_psr(void)
1481
{
1482
    helper_compute_psr();
1483

    
1484
#if !defined (TARGET_SPARC64)
1485
    return env->version | (env->psr & PSR_ICC) |
1486
        (env->psref? PSR_EF : 0) |
1487
        (env->psrpil << 8) |
1488
        (env->psrs? PSR_S : 0) |
1489
        (env->psrps? PSR_PS : 0) |
1490
        (env->psret? PSR_ET : 0) | env->cwp;
1491
#else
1492
    return env->psr & PSR_ICC;
1493
#endif
1494
}
1495

    
1496
target_ulong cpu_get_psr(CPUState *env1)
1497
{
1498
    CPUState *saved_env;
1499
    target_ulong ret;
1500

    
1501
    saved_env = env;
1502
    env = env1;
1503
    ret = get_psr();
1504
    env = saved_env;
1505
    return ret;
1506
}
1507

    
1508
static void put_psr(target_ulong val)
1509
{
1510
    env->psr = val & PSR_ICC;
1511
#if !defined (TARGET_SPARC64)
1512
    env->psref = (val & PSR_EF)? 1 : 0;
1513
    env->psrpil = (val & PSR_PIL) >> 8;
1514
#endif
1515
#if ((!defined (TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY))
1516
    cpu_check_irqs(env);
1517
#endif
1518
#if !defined (TARGET_SPARC64)
1519
    env->psrs = (val & PSR_S)? 1 : 0;
1520
    env->psrps = (val & PSR_PS)? 1 : 0;
1521
    env->psret = (val & PSR_ET)? 1 : 0;
1522
    set_cwp(val & PSR_CWP);
1523
#endif
1524
    env->cc_op = CC_OP_FLAGS;
1525
}
1526

    
1527
void cpu_put_psr(CPUState *env1, target_ulong val)
1528
{
1529
    CPUState *saved_env;
1530

    
1531
    saved_env = env;
1532
    env = env1;
1533
    put_psr(val);
1534
    env = saved_env;
1535
}
1536

    
1537
static int cwp_inc(int cwp)
1538
{
1539
    if (unlikely(cwp >= env->nwindows)) {
1540
        cwp -= env->nwindows;
1541
    }
1542
    return cwp;
1543
}
1544

    
1545
int cpu_cwp_inc(CPUState *env1, int cwp)
1546
{
1547
    CPUState *saved_env;
1548
    target_ulong ret;
1549

    
1550
    saved_env = env;
1551
    env = env1;
1552
    ret = cwp_inc(cwp);
1553
    env = saved_env;
1554
    return ret;
1555
}
1556

    
1557
static int cwp_dec(int cwp)
1558
{
1559
    if (unlikely(cwp < 0)) {
1560
        cwp += env->nwindows;
1561
    }
1562
    return cwp;
1563
}
1564

    
1565
int cpu_cwp_dec(CPUState *env1, int cwp)
1566
{
1567
    CPUState *saved_env;
1568
    target_ulong ret;
1569

    
1570
    saved_env = env;
1571
    env = env1;
1572
    ret = cwp_dec(cwp);
1573
    env = saved_env;
1574
    return ret;
1575
}
1576

    
1577
#ifdef TARGET_SPARC64
1578
GEN_FCMPS(fcmps_fcc1, float32, 22, 0);
1579
GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
1580
GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
1581

    
1582
GEN_FCMPS(fcmps_fcc2, float32, 24, 0);
1583
GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
1584
GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
1585

    
1586
GEN_FCMPS(fcmps_fcc3, float32, 26, 0);
1587
GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
1588
GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
1589

    
1590
GEN_FCMPS(fcmpes_fcc1, float32, 22, 1);
1591
GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
1592
GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
1593

    
1594
GEN_FCMPS(fcmpes_fcc2, float32, 24, 1);
1595
GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
1596
GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
1597

    
1598
GEN_FCMPS(fcmpes_fcc3, float32, 26, 1);
1599
GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
1600
GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
1601
#endif
1602
#undef GEN_FCMPS
1603

    
1604
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
1605
    defined(DEBUG_MXCC)
1606
static void dump_mxcc(CPUState *env)
1607
{
1608
    printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1609
           "\n",
1610
           env->mxccdata[0], env->mxccdata[1],
1611
           env->mxccdata[2], env->mxccdata[3]);
1612
    printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1613
           "\n"
1614
           "          %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1615
           "\n",
1616
           env->mxccregs[0], env->mxccregs[1],
1617
           env->mxccregs[2], env->mxccregs[3],
1618
           env->mxccregs[4], env->mxccregs[5],
1619
           env->mxccregs[6], env->mxccregs[7]);
1620
}
1621
#endif
1622

    
1623
#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
1624
    && defined(DEBUG_ASI)
1625
static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
1626
                     uint64_t r1)
1627
{
1628
    switch (size)
1629
    {
1630
    case 1:
1631
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
1632
                    addr, asi, r1 & 0xff);
1633
        break;
1634
    case 2:
1635
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
1636
                    addr, asi, r1 & 0xffff);
1637
        break;
1638
    case 4:
1639
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
1640
                    addr, asi, r1 & 0xffffffff);
1641
        break;
1642
    case 8:
1643
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
1644
                    addr, asi, r1);
1645
        break;
1646
    }
1647
}
1648
#endif
1649

    
1650
#ifndef TARGET_SPARC64
1651
#ifndef CONFIG_USER_ONLY
1652

    
1653

    
1654
/* Leon3 cache control */
1655

    
1656
static void leon3_cache_control_int(void)
1657
{
1658
    uint32_t state = 0;
1659

    
1660
    if (env->cache_control & CACHE_CTRL_IF) {
1661
        /* Instruction cache state */
1662
        state = env->cache_control & CACHE_STATE_MASK;
1663
        if (state == CACHE_ENABLED) {
1664
            state = CACHE_FROZEN;
1665
            DPRINTF_CACHE_CONTROL("Instruction cache: freeze\n");
1666
        }
1667

    
1668
        env->cache_control &= ~CACHE_STATE_MASK;
1669
        env->cache_control |= state;
1670
    }
1671

    
1672
    if (env->cache_control & CACHE_CTRL_DF) {
1673
        /* Data cache state */
1674
        state = (env->cache_control >> 2) & CACHE_STATE_MASK;
1675
        if (state == CACHE_ENABLED) {
1676
            state = CACHE_FROZEN;
1677
            DPRINTF_CACHE_CONTROL("Data cache: freeze\n");
1678
        }
1679

    
1680
        env->cache_control &= ~(CACHE_STATE_MASK << 2);
1681
        env->cache_control |= (state << 2);
1682
    }
1683
}
1684

    
1685
static void leon3_cache_control_st(target_ulong addr, uint64_t val, int size)
1686
{
1687
    DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
1688
                          addr, val, size);
1689

    
1690
    if (size != 4) {
1691
        DPRINTF_CACHE_CONTROL("32bits only\n");
1692
        return;
1693
    }
1694

    
1695
    switch (addr) {
1696
    case 0x00:              /* Cache control */
1697

    
1698
        /* These values must always be read as zeros */
1699
        val &= ~CACHE_CTRL_FD;
1700
        val &= ~CACHE_CTRL_FI;
1701
        val &= ~CACHE_CTRL_IB;
1702
        val &= ~CACHE_CTRL_IP;
1703
        val &= ~CACHE_CTRL_DP;
1704

    
1705
        env->cache_control = val;
1706
        break;
1707
    case 0x04:              /* Instruction cache configuration */
1708
    case 0x08:              /* Data cache configuration */
1709
        /* Read Only */
1710
        break;
1711
    default:
1712
        DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
1713
        break;
1714
    };
1715
}
1716

    
1717
static uint64_t leon3_cache_control_ld(target_ulong addr, int size)
1718
{
1719
    uint64_t ret = 0;
1720

    
1721
    if (size != 4) {
1722
        DPRINTF_CACHE_CONTROL("32bits only\n");
1723
        return 0;
1724
    }
1725

    
1726
    switch (addr) {
1727
    case 0x00:              /* Cache control */
1728
        ret = env->cache_control;
1729
        break;
1730

    
1731
        /* Configuration registers are read and only always keep those
1732
           predefined values */
1733

    
1734
    case 0x04:              /* Instruction cache configuration */
1735
        ret = 0x10220000;
1736
        break;
1737
    case 0x08:              /* Data cache configuration */
1738
        ret = 0x18220000;
1739
        break;
1740
    default:
1741
        DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
1742
        break;
1743
    };
1744
    DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
1745
                          addr, ret, size);
1746
    return ret;
1747
}
1748

    
1749
void leon3_irq_manager(void *irq_manager, int intno)
1750
{
1751
    leon3_irq_ack(irq_manager, intno);
1752
    leon3_cache_control_int();
1753
}
1754

    
1755
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1756
{
1757
    uint64_t ret = 0;
1758
#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
1759
    uint32_t last_addr = addr;
1760
#endif
1761

    
1762
    helper_check_align(addr, size - 1);
1763
    switch (asi) {
1764
    case 2: /* SuperSparc MXCC registers and Leon3 cache control */
1765
        switch (addr) {
1766
        case 0x00:          /* Leon3 Cache Control */
1767
        case 0x08:          /* Leon3 Instruction Cache config */
1768
        case 0x0C:          /* Leon3 Date Cache config */
1769
            if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
1770
                ret = leon3_cache_control_ld(addr, size);
1771
            }
1772
            break;
1773
        case 0x01c00a00: /* MXCC control register */
1774
            if (size == 8)
1775
                ret = env->mxccregs[3];
1776
            else
1777
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1778
                             size);
1779
            break;
1780
        case 0x01c00a04: /* MXCC control register */
1781
            if (size == 4)
1782
                ret = env->mxccregs[3];
1783
            else
1784
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1785
                             size);
1786
            break;
1787
        case 0x01c00c00: /* Module reset register */
1788
            if (size == 8) {
1789
                ret = env->mxccregs[5];
1790
                // should we do something here?
1791
            } else
1792
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1793
                             size);
1794
            break;
1795
        case 0x01c00f00: /* MBus port address register */
1796
            if (size == 8)
1797
                ret = env->mxccregs[7];
1798
            else
1799
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1800
                             size);
1801
            break;
1802
        default:
1803
            DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1804
                         size);
1805
            break;
1806
        }
1807
        DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
1808
                     "addr = %08x -> ret = %" PRIx64 ","
1809
                     "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
1810
#ifdef DEBUG_MXCC
1811
        dump_mxcc(env);
1812
#endif
1813
        break;
1814
    case 3: /* MMU probe */
1815
        {
1816
            int mmulev;
1817

    
1818
            mmulev = (addr >> 8) & 15;
1819
            if (mmulev > 4)
1820
                ret = 0;
1821
            else
1822
                ret = mmu_probe(env, addr, mmulev);
1823
            DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
1824
                        addr, mmulev, ret);
1825
        }
1826
        break;
1827
    case 4: /* read MMU regs */
1828
        {
1829
            int reg = (addr >> 8) & 0x1f;
1830

    
1831
            ret = env->mmuregs[reg];
1832
            if (reg == 3) /* Fault status cleared on read */
1833
                env->mmuregs[3] = 0;
1834
            else if (reg == 0x13) /* Fault status read */
1835
                ret = env->mmuregs[3];
1836
            else if (reg == 0x14) /* Fault address read */
1837
                ret = env->mmuregs[4];
1838
            DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
1839
        }
1840
        break;
1841
    case 5: // Turbosparc ITLB Diagnostic
1842
    case 6: // Turbosparc DTLB Diagnostic
1843
    case 7: // Turbosparc IOTLB Diagnostic
1844
        break;
1845
    case 9: /* Supervisor code access */
1846
        switch(size) {
1847
        case 1:
1848
            ret = ldub_code(addr);
1849
            break;
1850
        case 2:
1851
            ret = lduw_code(addr);
1852
            break;
1853
        default:
1854
        case 4:
1855
            ret = ldl_code(addr);
1856
            break;
1857
        case 8:
1858
            ret = ldq_code(addr);
1859
            break;
1860
        }
1861
        break;
1862
    case 0xa: /* User data access */
1863
        switch(size) {
1864
        case 1:
1865
            ret = ldub_user(addr);
1866
            break;
1867
        case 2:
1868
            ret = lduw_user(addr);
1869
            break;
1870
        default:
1871
        case 4:
1872
            ret = ldl_user(addr);
1873
            break;
1874
        case 8:
1875
            ret = ldq_user(addr);
1876
            break;
1877
        }
1878
        break;
1879
    case 0xb: /* Supervisor data access */
1880
        switch(size) {
1881
        case 1:
1882
            ret = ldub_kernel(addr);
1883
            break;
1884
        case 2:
1885
            ret = lduw_kernel(addr);
1886
            break;
1887
        default:
1888
        case 4:
1889
            ret = ldl_kernel(addr);
1890
            break;
1891
        case 8:
1892
            ret = ldq_kernel(addr);
1893
            break;
1894
        }
1895
        break;
1896
    case 0xc: /* I-cache tag */
1897
    case 0xd: /* I-cache data */
1898
    case 0xe: /* D-cache tag */
1899
    case 0xf: /* D-cache data */
1900
        break;
1901
    case 0x20: /* MMU passthrough */
1902
        switch(size) {
1903
        case 1:
1904
            ret = ldub_phys(addr);
1905
            break;
1906
        case 2:
1907
            ret = lduw_phys(addr);
1908
            break;
1909
        default:
1910
        case 4:
1911
            ret = ldl_phys(addr);
1912
            break;
1913
        case 8:
1914
            ret = ldq_phys(addr);
1915
            break;
1916
        }
1917
        break;
1918
    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1919
        switch(size) {
1920
        case 1:
1921
            ret = ldub_phys((target_phys_addr_t)addr
1922
                            | ((target_phys_addr_t)(asi & 0xf) << 32));
1923
            break;
1924
        case 2:
1925
            ret = lduw_phys((target_phys_addr_t)addr
1926
                            | ((target_phys_addr_t)(asi & 0xf) << 32));
1927
            break;
1928
        default:
1929
        case 4:
1930
            ret = ldl_phys((target_phys_addr_t)addr
1931
                           | ((target_phys_addr_t)(asi & 0xf) << 32));
1932
            break;
1933
        case 8:
1934
            ret = ldq_phys((target_phys_addr_t)addr
1935
                           | ((target_phys_addr_t)(asi & 0xf) << 32));
1936
            break;
1937
        }
1938
        break;
1939
    case 0x30: // Turbosparc secondary cache diagnostic
1940
    case 0x31: // Turbosparc RAM snoop
1941
    case 0x32: // Turbosparc page table descriptor diagnostic
1942
    case 0x39: /* data cache diagnostic register */
1943
        ret = 0;
1944
        break;
1945
    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
1946
        {
1947
            int reg = (addr >> 8) & 3;
1948

    
1949
            switch(reg) {
1950
            case 0: /* Breakpoint Value (Addr) */
1951
                ret = env->mmubpregs[reg];
1952
                break;
1953
            case 1: /* Breakpoint Mask */
1954
                ret = env->mmubpregs[reg];
1955
                break;
1956
            case 2: /* Breakpoint Control */
1957
                ret = env->mmubpregs[reg];
1958
                break;
1959
            case 3: /* Breakpoint Status */
1960
                ret = env->mmubpregs[reg];
1961
                env->mmubpregs[reg] = 0ULL;
1962
                break;
1963
            }
1964
            DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
1965
                        ret);
1966
        }
1967
        break;
1968
    case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
1969
        ret = env->mmubpctrv;
1970
        break;
1971
    case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
1972
        ret = env->mmubpctrc;
1973
        break;
1974
    case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
1975
        ret = env->mmubpctrs;
1976
        break;
1977
    case 0x4c: /* SuperSPARC MMU Breakpoint Action */
1978
        ret = env->mmubpaction;
1979
        break;
1980
    case 8: /* User code access, XXX */
1981
    default:
1982
        do_unassigned_access(addr, 0, 0, asi, size);
1983
        ret = 0;
1984
        break;
1985
    }
1986
    if (sign) {
1987
        switch(size) {
1988
        case 1:
1989
            ret = (int8_t) ret;
1990
            break;
1991
        case 2:
1992
            ret = (int16_t) ret;
1993
            break;
1994
        case 4:
1995
            ret = (int32_t) ret;
1996
            break;
1997
        default:
1998
            break;
1999
        }
2000
    }
2001
#ifdef DEBUG_ASI
2002
    dump_asi("read ", last_addr, asi, size, ret);
2003
#endif
2004
    return ret;
2005
}
2006

    
2007
void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
2008
{
2009
    helper_check_align(addr, size - 1);
2010
    switch(asi) {
2011
    case 2: /* SuperSparc MXCC registers and Leon3 cache control */
2012
        switch (addr) {
2013
        case 0x00:          /* Leon3 Cache Control */
2014
        case 0x08:          /* Leon3 Instruction Cache config */
2015
        case 0x0C:          /* Leon3 Date Cache config */
2016
            if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
2017
                leon3_cache_control_st(addr, val, size);
2018
            }
2019
            break;
2020

    
2021
        case 0x01c00000: /* MXCC stream data register 0 */
2022
            if (size == 8)
2023
                env->mxccdata[0] = val;
2024
            else
2025
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2026
                             size);
2027
            break;
2028
        case 0x01c00008: /* MXCC stream data register 1 */
2029
            if (size == 8)
2030
                env->mxccdata[1] = val;
2031
            else
2032
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2033
                             size);
2034
            break;
2035
        case 0x01c00010: /* MXCC stream data register 2 */
2036
            if (size == 8)
2037
                env->mxccdata[2] = val;
2038
            else
2039
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2040
                             size);
2041
            break;
2042
        case 0x01c00018: /* MXCC stream data register 3 */
2043
            if (size == 8)
2044
                env->mxccdata[3] = val;
2045
            else
2046
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2047
                             size);
2048
            break;
2049
        case 0x01c00100: /* MXCC stream source */
2050
            if (size == 8)
2051
                env->mxccregs[0] = val;
2052
            else
2053
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2054
                             size);
2055
            env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2056
                                        0);
2057
            env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2058
                                        8);
2059
            env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2060
                                        16);
2061
            env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2062
                                        24);
2063
            break;
2064
        case 0x01c00200: /* MXCC stream destination */
2065
            if (size == 8)
2066
                env->mxccregs[1] = val;
2067
            else
2068
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2069
                             size);
2070
            stq_phys((env->mxccregs[1] & 0xffffffffULL) +  0,
2071
                     env->mxccdata[0]);
2072
            stq_phys((env->mxccregs[1] & 0xffffffffULL) +  8,
2073
                     env->mxccdata[1]);
2074
            stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
2075
                     env->mxccdata[2]);
2076
            stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
2077
                     env->mxccdata[3]);
2078
            break;
2079
        case 0x01c00a00: /* MXCC control register */
2080
            if (size == 8)
2081
                env->mxccregs[3] = val;
2082
            else
2083
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2084
                             size);
2085
            break;
2086
        case 0x01c00a04: /* MXCC control register */
2087
            if (size == 4)
2088
                env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
2089
                    | val;
2090
            else
2091
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2092
                             size);
2093
            break;
2094
        case 0x01c00e00: /* MXCC error register  */
2095
            // writing a 1 bit clears the error
2096
            if (size == 8)
2097
                env->mxccregs[6] &= ~val;
2098
            else
2099
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2100
                             size);
2101
            break;
2102
        case 0x01c00f00: /* MBus port address register */
2103
            if (size == 8)
2104
                env->mxccregs[7] = val;
2105
            else
2106
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2107
                             size);
2108
            break;
2109
        default:
2110
            DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
2111
                         size);
2112
            break;
2113
        }
2114
        DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
2115
                     asi, size, addr, val);
2116
#ifdef DEBUG_MXCC
2117
        dump_mxcc(env);
2118
#endif
2119
        break;
2120
    case 3: /* MMU flush */
2121
        {
2122
            int mmulev;
2123

    
2124
            mmulev = (addr >> 8) & 15;
2125
            DPRINTF_MMU("mmu flush level %d\n", mmulev);
2126
            switch (mmulev) {
2127
            case 0: // flush page
2128
                tlb_flush_page(env, addr & 0xfffff000);
2129
                break;
2130
            case 1: // flush segment (256k)
2131
            case 2: // flush region (16M)
2132
            case 3: // flush context (4G)
2133
            case 4: // flush entire
2134
                tlb_flush(env, 1);
2135
                break;
2136
            default:
2137
                break;
2138
            }
2139
#ifdef DEBUG_MMU
2140
            dump_mmu(stdout, fprintf, env);
2141
#endif
2142
        }
2143
        break;
2144
    case 4: /* write MMU regs */
2145
        {
2146
            int reg = (addr >> 8) & 0x1f;
2147
            uint32_t oldreg;
2148

    
2149
            oldreg = env->mmuregs[reg];
2150
            switch(reg) {
2151
            case 0: // Control Register
2152
                env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
2153
                                    (val & 0x00ffffff);
2154
                // Mappings generated during no-fault mode or MMU
2155
                // disabled mode are invalid in normal mode
2156
                if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
2157
                    (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
2158
                    tlb_flush(env, 1);
2159
                break;
2160
            case 1: // Context Table Pointer Register
2161
                env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
2162
                break;
2163
            case 2: // Context Register
2164
                env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
2165
                if (oldreg != env->mmuregs[reg]) {
2166
                    /* we flush when the MMU context changes because
2167
                       QEMU has no MMU context support */
2168
                    tlb_flush(env, 1);
2169
                }
2170
                break;
2171
            case 3: // Synchronous Fault Status Register with Clear
2172
            case 4: // Synchronous Fault Address Register
2173
                break;
2174
            case 0x10: // TLB Replacement Control Register
2175
                env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
2176
                break;
2177
            case 0x13: // Synchronous Fault Status Register with Read and Clear
2178
                env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
2179
                break;
2180
            case 0x14: // Synchronous Fault Address Register
2181
                env->mmuregs[4] = val;
2182
                break;
2183
            default:
2184
                env->mmuregs[reg] = val;
2185
                break;
2186
            }
2187
            if (oldreg != env->mmuregs[reg]) {
2188
                DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
2189
                            reg, oldreg, env->mmuregs[reg]);
2190
            }
2191
#ifdef DEBUG_MMU
2192
            dump_mmu(stdout, fprintf, env);
2193
#endif
2194
        }
2195
        break;
2196
    case 5: // Turbosparc ITLB Diagnostic
2197
    case 6: // Turbosparc DTLB Diagnostic
2198
    case 7: // Turbosparc IOTLB Diagnostic
2199
        break;
2200
    case 0xa: /* User data access */
2201
        switch(size) {
2202
        case 1:
2203
            stb_user(addr, val);
2204
            break;
2205
        case 2:
2206
            stw_user(addr, val);
2207
            break;
2208
        default:
2209
        case 4:
2210
            stl_user(addr, val);
2211
            break;
2212
        case 8:
2213
            stq_user(addr, val);
2214
            break;
2215
        }
2216
        break;
2217
    case 0xb: /* Supervisor data access */
2218
        switch(size) {
2219
        case 1:
2220
            stb_kernel(addr, val);
2221
            break;
2222
        case 2:
2223
            stw_kernel(addr, val);
2224
            break;
2225
        default:
2226
        case 4:
2227
            stl_kernel(addr, val);
2228
            break;
2229
        case 8:
2230
            stq_kernel(addr, val);
2231
            break;
2232
        }
2233
        break;
2234
    case 0xc: /* I-cache tag */
2235
    case 0xd: /* I-cache data */
2236
    case 0xe: /* D-cache tag */
2237
    case 0xf: /* D-cache data */
2238
    case 0x10: /* I/D-cache flush page */
2239
    case 0x11: /* I/D-cache flush segment */
2240
    case 0x12: /* I/D-cache flush region */
2241
    case 0x13: /* I/D-cache flush context */
2242
    case 0x14: /* I/D-cache flush user */
2243
        break;
2244
    case 0x17: /* Block copy, sta access */
2245
        {
2246
            // val = src
2247
            // addr = dst
2248
            // copy 32 bytes
2249
            unsigned int i;
2250
            uint32_t src = val & ~3, dst = addr & ~3, temp;
2251

    
2252
            for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
2253
                temp = ldl_kernel(src);
2254
                stl_kernel(dst, temp);
2255
            }
2256
        }
2257
        break;
2258
    case 0x1f: /* Block fill, stda access */
2259
        {
2260
            // addr = dst
2261
            // fill 32 bytes with val
2262
            unsigned int i;
2263
            uint32_t dst = addr & 7;
2264

    
2265
            for (i = 0; i < 32; i += 8, dst += 8)
2266
                stq_kernel(dst, val);
2267
        }
2268
        break;
2269
    case 0x20: /* MMU passthrough */
2270
        {
2271
            switch(size) {
2272
            case 1:
2273
                stb_phys(addr, val);
2274
                break;
2275
            case 2:
2276
                stw_phys(addr, val);
2277
                break;
2278
            case 4:
2279
            default:
2280
                stl_phys(addr, val);
2281
                break;
2282
            case 8:
2283
                stq_phys(addr, val);
2284
                break;
2285
            }
2286
        }
2287
        break;
2288
    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
2289
        {
2290
            switch(size) {
2291
            case 1:
2292
                stb_phys((target_phys_addr_t)addr
2293
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2294
                break;
2295
            case 2:
2296
                stw_phys((target_phys_addr_t)addr
2297
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2298
                break;
2299
            case 4:
2300
            default:
2301
                stl_phys((target_phys_addr_t)addr
2302
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2303
                break;
2304
            case 8:
2305
                stq_phys((target_phys_addr_t)addr
2306
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2307
                break;
2308
            }
2309
        }
2310
        break;
2311
    case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
2312
    case 0x31: // store buffer data, Ross RT620 I-cache flush or
2313
               // Turbosparc snoop RAM
2314
    case 0x32: // store buffer control or Turbosparc page table
2315
               // descriptor diagnostic
2316
    case 0x36: /* I-cache flash clear */
2317
    case 0x37: /* D-cache flash clear */
2318
        break;
2319
    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
2320
        {
2321
            int reg = (addr >> 8) & 3;
2322

    
2323
            switch(reg) {
2324
            case 0: /* Breakpoint Value (Addr) */
2325
                env->mmubpregs[reg] = (val & 0xfffffffffULL);
2326
                break;
2327
            case 1: /* Breakpoint Mask */
2328
                env->mmubpregs[reg] = (val & 0xfffffffffULL);
2329
                break;
2330
            case 2: /* Breakpoint Control */
2331
                env->mmubpregs[reg] = (val & 0x7fULL);
2332
                break;
2333
            case 3: /* Breakpoint Status */
2334
                env->mmubpregs[reg] = (val & 0xfULL);
2335
                break;
2336
            }
2337
            DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
2338
                        env->mmuregs[reg]);
2339
        }
2340
        break;
2341
    case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
2342
        env->mmubpctrv = val & 0xffffffff;
2343
        break;
2344
    case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
2345
        env->mmubpctrc = val & 0x3;
2346
        break;
2347
    case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
2348
        env->mmubpctrs = val & 0x3;
2349
        break;
2350
    case 0x4c: /* SuperSPARC MMU Breakpoint Action */
2351
        env->mmubpaction = val & 0x1fff;
2352
        break;
2353
    case 8: /* User code access, XXX */
2354
    case 9: /* Supervisor code access, XXX */
2355
    default:
2356
        do_unassigned_access(addr, 1, 0, asi, size);
2357
        break;
2358
    }
2359
#ifdef DEBUG_ASI
2360
    dump_asi("write", addr, asi, size, val);
2361
#endif
2362
}
2363

    
2364
#endif /* CONFIG_USER_ONLY */
2365
#else /* TARGET_SPARC64 */
2366

    
2367
#ifdef CONFIG_USER_ONLY
2368
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2369
{
2370
    uint64_t ret = 0;
2371
#if defined(DEBUG_ASI)
2372
    target_ulong last_addr = addr;
2373
#endif
2374

    
2375
    if (asi < 0x80)
2376
        raise_exception(TT_PRIV_ACT);
2377

    
2378
    helper_check_align(addr, size - 1);
2379
    addr = asi_address_mask(env, asi, addr);
2380

    
2381
    switch (asi) {
2382
    case 0x82: // Primary no-fault
2383
    case 0x8a: // Primary no-fault LE
2384
        if (page_check_range(addr, size, PAGE_READ) == -1) {
2385
#ifdef DEBUG_ASI
2386
            dump_asi("read ", last_addr, asi, size, ret);
2387
#endif
2388
            return 0;
2389
        }
2390
        // Fall through
2391
    case 0x80: // Primary
2392
    case 0x88: // Primary LE
2393
        {
2394
            switch(size) {
2395
            case 1:
2396
                ret = ldub_raw(addr);
2397
                break;
2398
            case 2:
2399
                ret = lduw_raw(addr);
2400
                break;
2401
            case 4:
2402
                ret = ldl_raw(addr);
2403
                break;
2404
            default:
2405
            case 8:
2406
                ret = ldq_raw(addr);
2407
                break;
2408
            }
2409
        }
2410
        break;
2411
    case 0x83: // Secondary no-fault
2412
    case 0x8b: // Secondary no-fault LE
2413
        if (page_check_range(addr, size, PAGE_READ) == -1) {
2414
#ifdef DEBUG_ASI
2415
            dump_asi("read ", last_addr, asi, size, ret);
2416
#endif
2417
            return 0;
2418
        }
2419
        // Fall through
2420
    case 0x81: // Secondary
2421
    case 0x89: // Secondary LE
2422
        // XXX
2423
        break;
2424
    default:
2425
        break;
2426
    }
2427

    
2428
    /* Convert from little endian */
2429
    switch (asi) {
2430
    case 0x88: // Primary LE
2431
    case 0x89: // Secondary LE
2432
    case 0x8a: // Primary no-fault LE
2433
    case 0x8b: // Secondary no-fault LE
2434
        switch(size) {
2435
        case 2:
2436
            ret = bswap16(ret);
2437
            break;
2438
        case 4:
2439
            ret = bswap32(ret);
2440
            break;
2441
        case 8:
2442
            ret = bswap64(ret);
2443
            break;
2444
        default:
2445
            break;
2446
        }
2447
    default:
2448
        break;
2449
    }
2450

    
2451
    /* Convert to signed number */
2452
    if (sign) {
2453
        switch(size) {
2454
        case 1:
2455
            ret = (int8_t) ret;
2456
            break;
2457
        case 2:
2458
            ret = (int16_t) ret;
2459
            break;
2460
        case 4:
2461
            ret = (int32_t) ret;
2462
            break;
2463
        default:
2464
            break;
2465
        }
2466
    }
2467
#ifdef DEBUG_ASI
2468
    dump_asi("read ", last_addr, asi, size, ret);
2469
#endif
2470
    return ret;
2471
}
2472

    
2473
void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2474
{
2475
#ifdef DEBUG_ASI
2476
    dump_asi("write", addr, asi, size, val);
2477
#endif
2478
    if (asi < 0x80)
2479
        raise_exception(TT_PRIV_ACT);
2480

    
2481
    helper_check_align(addr, size - 1);
2482
    addr = asi_address_mask(env, asi, addr);
2483

    
2484
    /* Convert to little endian */
2485
    switch (asi) {
2486
    case 0x88: // Primary LE
2487
    case 0x89: // Secondary LE
2488
        switch(size) {
2489
        case 2:
2490
            val = bswap16(val);
2491
            break;
2492
        case 4:
2493
            val = bswap32(val);
2494
            break;
2495
        case 8:
2496
            val = bswap64(val);
2497
            break;
2498
        default:
2499
            break;
2500
        }
2501
    default:
2502
        break;
2503
    }
2504

    
2505
    switch(asi) {
2506
    case 0x80: // Primary
2507
    case 0x88: // Primary LE
2508
        {
2509
            switch(size) {
2510
            case 1:
2511
                stb_raw(addr, val);
2512
                break;
2513
            case 2:
2514
                stw_raw(addr, val);
2515
                break;
2516
            case 4:
2517
                stl_raw(addr, val);
2518
                break;
2519
            case 8:
2520
            default:
2521
                stq_raw(addr, val);
2522
                break;
2523
            }
2524
        }
2525
        break;
2526
    case 0x81: // Secondary
2527
    case 0x89: // Secondary LE
2528
        // XXX
2529
        return;
2530

    
2531
    case 0x82: // Primary no-fault, RO
2532
    case 0x83: // Secondary no-fault, RO
2533
    case 0x8a: // Primary no-fault LE, RO
2534
    case 0x8b: // Secondary no-fault LE, RO
2535
    default:
2536
        do_unassigned_access(addr, 1, 0, 1, size);
2537
        return;
2538
    }
2539
}
2540

    
2541
#else /* CONFIG_USER_ONLY */
2542

    
2543
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2544
{
2545
    uint64_t ret = 0;
2546
#if defined(DEBUG_ASI)
2547
    target_ulong last_addr = addr;
2548
#endif
2549

    
2550
    asi &= 0xff;
2551

    
2552
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2553
        || (cpu_has_hypervisor(env)
2554
            && asi >= 0x30 && asi < 0x80
2555
            && !(env->hpstate & HS_PRIV)))
2556
        raise_exception(TT_PRIV_ACT);
2557

    
2558
    helper_check_align(addr, size - 1);
2559
    addr = asi_address_mask(env, asi, addr);
2560

    
2561
    switch (asi) {
2562
    case 0x82: // Primary no-fault
2563
    case 0x8a: // Primary no-fault LE
2564
    case 0x83: // Secondary no-fault
2565
    case 0x8b: // Secondary no-fault LE
2566
        {
2567
            /* secondary space access has lowest asi bit equal to 1 */
2568
            int access_mmu_idx = ( asi & 1 ) ? MMU_KERNEL_IDX
2569
                                             : MMU_KERNEL_SECONDARY_IDX;
2570

    
2571
            if (cpu_get_phys_page_nofault(env, addr, access_mmu_idx) == -1ULL) {
2572
#ifdef DEBUG_ASI
2573
                dump_asi("read ", last_addr, asi, size, ret);
2574
#endif
2575
                return 0;
2576
            }
2577
        }
2578
        // Fall through
2579
    case 0x10: // As if user primary
2580
    case 0x11: // As if user secondary
2581
    case 0x18: // As if user primary LE
2582
    case 0x19: // As if user secondary LE
2583
    case 0x80: // Primary
2584
    case 0x81: // Secondary
2585
    case 0x88: // Primary LE
2586
    case 0x89: // Secondary LE
2587
    case 0xe2: // UA2007 Primary block init
2588
    case 0xe3: // UA2007 Secondary block init
2589
        if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2590
            if (cpu_hypervisor_mode(env)) {
2591
                switch(size) {
2592
                case 1:
2593
                    ret = ldub_hypv(addr);
2594
                    break;
2595
                case 2:
2596
                    ret = lduw_hypv(addr);
2597
                    break;
2598
                case 4:
2599
                    ret = ldl_hypv(addr);
2600
                    break;
2601
                default:
2602
                case 8:
2603
                    ret = ldq_hypv(addr);
2604
                    break;
2605
                }
2606
            } else {
2607
                /* secondary space access has lowest asi bit equal to 1 */
2608
                if (asi & 1) {
2609
                    switch(size) {
2610
                    case 1:
2611
                        ret = ldub_kernel_secondary(addr);
2612
                        break;
2613
                    case 2:
2614
                        ret = lduw_kernel_secondary(addr);
2615
                        break;
2616
                    case 4:
2617
                        ret = ldl_kernel_secondary(addr);
2618
                        break;
2619
                    default:
2620
                    case 8:
2621
                        ret = ldq_kernel_secondary(addr);
2622
                        break;
2623
                    }
2624
                } else {
2625
                    switch(size) {
2626
                    case 1:
2627
                        ret = ldub_kernel(addr);
2628
                        break;
2629
                    case 2:
2630
                        ret = lduw_kernel(addr);
2631
                        break;
2632
                    case 4:
2633
                        ret = ldl_kernel(addr);
2634
                        break;
2635
                    default:
2636
                    case 8:
2637
                        ret = ldq_kernel(addr);
2638
                        break;
2639
                    }
2640
                }
2641
            }
2642
        } else {
2643
            /* secondary space access has lowest asi bit equal to 1 */
2644
            if (asi & 1) {
2645
                switch(size) {
2646
                case 1:
2647
                    ret = ldub_user_secondary(addr);
2648
                    break;
2649
                case 2:
2650
                    ret = lduw_user_secondary(addr);
2651
                    break;
2652
                case 4:
2653
                    ret = ldl_user_secondary(addr);
2654
                    break;
2655
                default:
2656
                case 8:
2657
                    ret = ldq_user_secondary(addr);
2658
                    break;
2659
                }
2660
            } else {
2661
                switch(size) {
2662
                case 1:
2663
                    ret = ldub_user(addr);
2664
                    break;
2665
                case 2:
2666
                    ret = lduw_user(addr);
2667
                    break;
2668
                case 4:
2669
                    ret = ldl_user(addr);
2670
                    break;
2671
                default:
2672
                case 8:
2673
                    ret = ldq_user(addr);
2674
                    break;
2675
                }
2676
            }
2677
        }
2678
        break;
2679
    case 0x14: // Bypass
2680
    case 0x15: // Bypass, non-cacheable
2681
    case 0x1c: // Bypass LE
2682
    case 0x1d: // Bypass, non-cacheable LE
2683
        {
2684
            switch(size) {
2685
            case 1:
2686
                ret = ldub_phys(addr);
2687
                break;
2688
            case 2:
2689
                ret = lduw_phys(addr);
2690
                break;
2691
            case 4:
2692
                ret = ldl_phys(addr);
2693
                break;
2694
            default:
2695
            case 8:
2696
                ret = ldq_phys(addr);
2697
                break;
2698
            }
2699
            break;
2700
        }
2701
    case 0x24: // Nucleus quad LDD 128 bit atomic
2702
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2703
        //  Only ldda allowed
2704
        raise_exception(TT_ILL_INSN);
2705
        return 0;
2706
    case 0x04: // Nucleus
2707
    case 0x0c: // Nucleus Little Endian (LE)
2708
    {
2709
        switch(size) {
2710
        case 1:
2711
            ret = ldub_nucleus(addr);
2712
            break;
2713
        case 2:
2714
            ret = lduw_nucleus(addr);
2715
            break;
2716
        case 4:
2717
            ret = ldl_nucleus(addr);
2718
            break;
2719
        default:
2720
        case 8:
2721
            ret = ldq_nucleus(addr);
2722
            break;
2723
        }
2724
        break;
2725
    }
2726
    case 0x4a: // UPA config
2727
        // XXX
2728
        break;
2729
    case 0x45: // LSU
2730
        ret = env->lsu;
2731
        break;
2732
    case 0x50: // I-MMU regs
2733
        {
2734
            int reg = (addr >> 3) & 0xf;
2735

    
2736
            if (reg == 0) {
2737
                // I-TSB Tag Target register
2738
                ret = ultrasparc_tag_target(env->immu.tag_access);
2739
            } else {
2740
                ret = env->immuregs[reg];
2741
            }
2742

    
2743
            break;
2744
        }
2745
    case 0x51: // I-MMU 8k TSB pointer
2746
        {
2747
            // env->immuregs[5] holds I-MMU TSB register value
2748
            // env->immuregs[6] holds I-MMU Tag Access register value
2749
            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2750
                                         8*1024);
2751
            break;
2752
        }
2753
    case 0x52: // I-MMU 64k TSB pointer
2754
        {
2755
            // env->immuregs[5] holds I-MMU TSB register value
2756
            // env->immuregs[6] holds I-MMU Tag Access register value
2757
            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2758
                                         64*1024);
2759
            break;
2760
        }
2761
    case 0x55: // I-MMU data access
2762
        {
2763
            int reg = (addr >> 3) & 0x3f;
2764

    
2765
            ret = env->itlb[reg].tte;
2766
            break;
2767
        }
2768
    case 0x56: // I-MMU tag read
2769
        {
2770
            int reg = (addr >> 3) & 0x3f;
2771

    
2772
            ret = env->itlb[reg].tag;
2773
            break;
2774
        }
2775
    case 0x58: // D-MMU regs
2776
        {
2777
            int reg = (addr >> 3) & 0xf;
2778

    
2779
            if (reg == 0) {
2780
                // D-TSB Tag Target register
2781
                ret = ultrasparc_tag_target(env->dmmu.tag_access);
2782
            } else {
2783
                ret = env->dmmuregs[reg];
2784
            }
2785
            break;
2786
        }
2787
    case 0x59: // D-MMU 8k TSB pointer
2788
        {
2789
            // env->dmmuregs[5] holds D-MMU TSB register value
2790
            // env->dmmuregs[6] holds D-MMU Tag Access register value
2791
            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2792
                                         8*1024);
2793
            break;
2794
        }
2795
    case 0x5a: // D-MMU 64k TSB pointer
2796
        {
2797
            // env->dmmuregs[5] holds D-MMU TSB register value
2798
            // env->dmmuregs[6] holds D-MMU Tag Access register value
2799
            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2800
                                         64*1024);
2801
            break;
2802
        }
2803
    case 0x5d: // D-MMU data access
2804
        {
2805
            int reg = (addr >> 3) & 0x3f;
2806

    
2807
            ret = env->dtlb[reg].tte;
2808
            break;
2809
        }
2810
    case 0x5e: // D-MMU tag read
2811
        {
2812
            int reg = (addr >> 3) & 0x3f;
2813

    
2814
            ret = env->dtlb[reg].tag;
2815
            break;
2816
        }
2817
    case 0x46: // D-cache data
2818
    case 0x47: // D-cache tag access
2819
    case 0x4b: // E-cache error enable
2820
    case 0x4c: // E-cache asynchronous fault status
2821
    case 0x4d: // E-cache asynchronous fault address
2822
    case 0x4e: // E-cache tag data
2823
    case 0x66: // I-cache instruction access
2824
    case 0x67: // I-cache tag access
2825
    case 0x6e: // I-cache predecode
2826
    case 0x6f: // I-cache LRU etc.
2827
    case 0x76: // E-cache tag
2828
    case 0x7e: // E-cache tag
2829
        break;
2830
    case 0x5b: // D-MMU data pointer
2831
    case 0x48: // Interrupt dispatch, RO
2832
    case 0x49: // Interrupt data receive
2833
    case 0x7f: // Incoming interrupt vector, RO
2834
        // XXX
2835
        break;
2836
    case 0x54: // I-MMU data in, WO
2837
    case 0x57: // I-MMU demap, WO
2838
    case 0x5c: // D-MMU data in, WO
2839
    case 0x5f: // D-MMU demap, WO
2840
    case 0x77: // Interrupt vector, WO
2841
    default:
2842
        do_unassigned_access(addr, 0, 0, 1, size);
2843
        ret = 0;
2844
        break;
2845
    }
2846

    
2847
    /* Convert from little endian */
2848
    switch (asi) {
2849
    case 0x0c: // Nucleus Little Endian (LE)
2850
    case 0x18: // As if user primary LE
2851
    case 0x19: // As if user secondary LE
2852
    case 0x1c: // Bypass LE
2853
    case 0x1d: // Bypass, non-cacheable LE
2854
    case 0x88: // Primary LE
2855
    case 0x89: // Secondary LE
2856
    case 0x8a: // Primary no-fault LE
2857
    case 0x8b: // Secondary no-fault LE
2858
        switch(size) {
2859
        case 2:
2860
            ret = bswap16(ret);
2861
            break;
2862
        case 4:
2863
            ret = bswap32(ret);
2864
            break;
2865
        case 8:
2866
            ret = bswap64(ret);
2867
            break;
2868
        default:
2869
            break;
2870
        }
2871
    default:
2872
        break;
2873
    }
2874

    
2875
    /* Convert to signed number */
2876
    if (sign) {
2877
        switch(size) {
2878
        case 1:
2879
            ret = (int8_t) ret;
2880
            break;
2881
        case 2:
2882
            ret = (int16_t) ret;
2883
            break;
2884
        case 4:
2885
            ret = (int32_t) ret;
2886
            break;
2887
        default:
2888
            break;
2889
        }
2890
    }
2891
#ifdef DEBUG_ASI
2892
    dump_asi("read ", last_addr, asi, size, ret);
2893
#endif
2894
    return ret;
2895
}
2896

    
2897
void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2898
{
2899
#ifdef DEBUG_ASI
2900
    dump_asi("write", addr, asi, size, val);
2901
#endif
2902

    
2903
    asi &= 0xff;
2904

    
2905
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2906
        || (cpu_has_hypervisor(env)
2907
            && asi >= 0x30 && asi < 0x80
2908
            && !(env->hpstate & HS_PRIV)))
2909
        raise_exception(TT_PRIV_ACT);
2910

    
2911
    helper_check_align(addr, size - 1);
2912
    addr = asi_address_mask(env, asi, addr);
2913

    
2914
    /* Convert to little endian */
2915
    switch (asi) {
2916
    case 0x0c: // Nucleus Little Endian (LE)
2917
    case 0x18: // As if user primary LE
2918
    case 0x19: // As if user secondary LE
2919
    case 0x1c: // Bypass LE
2920
    case 0x1d: // Bypass, non-cacheable LE
2921
    case 0x88: // Primary LE
2922
    case 0x89: // Secondary LE
2923
        switch(size) {
2924
        case 2:
2925
            val = bswap16(val);
2926
            break;
2927
        case 4:
2928
            val = bswap32(val);
2929
            break;
2930
        case 8:
2931
            val = bswap64(val);
2932
            break;
2933
        default:
2934
            break;
2935
        }
2936
    default:
2937
        break;
2938
    }
2939

    
2940
    switch(asi) {
2941
    case 0x10: // As if user primary
2942
    case 0x11: // As if user secondary
2943
    case 0x18: // As if user primary LE
2944
    case 0x19: // As if user secondary LE
2945
    case 0x80: // Primary
2946
    case 0x81: // Secondary
2947
    case 0x88: // Primary LE
2948
    case 0x89: // Secondary LE
2949
    case 0xe2: // UA2007 Primary block init
2950
    case 0xe3: // UA2007 Secondary block init
2951
        if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2952
            if (cpu_hypervisor_mode(env)) {
2953
                switch(size) {
2954
                case 1:
2955
                    stb_hypv(addr, val);
2956
                    break;
2957
                case 2:
2958
                    stw_hypv(addr, val);
2959
                    break;
2960
                case 4:
2961
                    stl_hypv(addr, val);
2962
                    break;
2963
                case 8:
2964
                default:
2965
                    stq_hypv(addr, val);
2966
                    break;
2967
                }
2968
            } else {
2969
                /* secondary space access has lowest asi bit equal to 1 */
2970
                if (asi & 1) {
2971
                    switch(size) {
2972
                    case 1:
2973
                        stb_kernel_secondary(addr, val);
2974
                        break;
2975
                    case 2:
2976
                        stw_kernel_secondary(addr, val);
2977
                        break;
2978
                    case 4:
2979
                        stl_kernel_secondary(addr, val);
2980
                        break;
2981
                    case 8:
2982
                    default:
2983
                        stq_kernel_secondary(addr, val);
2984
                        break;
2985
                    }
2986
                } else {
2987
                    switch(size) {
2988
                    case 1:
2989
                        stb_kernel(addr, val);
2990
                        break;
2991
                    case 2:
2992
                        stw_kernel(addr, val);
2993
                        break;
2994
                    case 4:
2995
                        stl_kernel(addr, val);
2996
                        break;
2997
                    case 8:
2998
                    default:
2999
                        stq_kernel(addr, val);
3000
                        break;
3001
                    }
3002
                }
3003
            }
3004
        } else {
3005
            /* secondary space access has lowest asi bit equal to 1 */
3006
            if (asi & 1) {
3007
                switch(size) {
3008
                case 1:
3009
                    stb_user_secondary(addr, val);
3010
                    break;
3011
                case 2:
3012
                    stw_user_secondary(addr, val);
3013
                    break;
3014
                case 4:
3015
                    stl_user_secondary(addr, val);
3016
                    break;
3017
                case 8:
3018
                default:
3019
                    stq_user_secondary(addr, val);
3020
                    break;
3021
                }
3022
            } else {
3023
                switch(size) {
3024
                case 1:
3025
                    stb_user(addr, val);
3026
                    break;
3027
                case 2:
3028
                    stw_user(addr, val);
3029
                    break;
3030
                case 4:
3031
                    stl_user(addr, val);
3032
                    break;
3033
                case 8:
3034
                default:
3035
                    stq_user(addr, val);
3036
                    break;
3037
                }
3038
            }
3039
        }
3040
        break;
3041
    case 0x14: // Bypass
3042
    case 0x15: // Bypass, non-cacheable
3043
    case 0x1c: // Bypass LE
3044
    case 0x1d: // Bypass, non-cacheable LE
3045
        {
3046
            switch(size) {
3047
            case 1:
3048
                stb_phys(addr, val);
3049
                break;
3050
            case 2:
3051
                stw_phys(addr, val);
3052
                break;
3053
            case 4:
3054
                stl_phys(addr, val);
3055
                break;
3056
            case 8:
3057
            default:
3058
                stq_phys(addr, val);
3059
                break;
3060
            }
3061
        }
3062
        return;
3063
    case 0x24: // Nucleus quad LDD 128 bit atomic
3064
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
3065
        //  Only ldda allowed
3066
        raise_exception(TT_ILL_INSN);
3067
        return;
3068
    case 0x04: // Nucleus
3069
    case 0x0c: // Nucleus Little Endian (LE)
3070
    {
3071
        switch(size) {
3072
        case 1:
3073
            stb_nucleus(addr, val);
3074
            break;
3075
        case 2:
3076
            stw_nucleus(addr, val);
3077
            break;
3078
        case 4:
3079
            stl_nucleus(addr, val);
3080
            break;
3081
        default:
3082
        case 8:
3083
            stq_nucleus(addr, val);
3084
            break;
3085
        }
3086
        break;
3087
    }
3088

    
3089
    case 0x4a: // UPA config
3090
        // XXX
3091
        return;
3092
    case 0x45: // LSU
3093
        {
3094
            uint64_t oldreg;
3095

    
3096
            oldreg = env->lsu;
3097
            env->lsu = val & (DMMU_E | IMMU_E);
3098
            // Mappings generated during D/I MMU disabled mode are
3099
            // invalid in normal mode
3100
            if (oldreg != env->lsu) {
3101
                DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
3102
                            oldreg, env->lsu);
3103
#ifdef DEBUG_MMU
3104
                dump_mmu(stdout, fprintf, env1);
3105
#endif
3106
                tlb_flush(env, 1);
3107
            }
3108
            return;
3109
        }
3110
    case 0x50: // I-MMU regs
3111
        {
3112
            int reg = (addr >> 3) & 0xf;
3113
            uint64_t oldreg;
3114

    
3115
            oldreg = env->immuregs[reg];
3116
            switch(reg) {
3117
            case 0: // RO
3118
                return;
3119
            case 1: // Not in I-MMU
3120
            case 2:
3121
                return;
3122
            case 3: // SFSR
3123
                if ((val & 1) == 0)
3124
                    val = 0; // Clear SFSR
3125
                env->immu.sfsr = val;
3126
                break;
3127
            case 4: // RO
3128
                return;
3129
            case 5: // TSB access
3130
                DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
3131
                            PRIx64 "\n", env->immu.tsb, val);
3132
                env->immu.tsb = val;
3133
                break;
3134
            case 6: // Tag access
3135
                env->immu.tag_access = val;
3136
                break;
3137
            case 7:
3138
            case 8:
3139
                return;
3140
            default:
3141
                break;
3142
            }
3143

    
3144
            if (oldreg != env->immuregs[reg]) {
3145
                DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
3146
                            PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
3147
            }
3148
#ifdef DEBUG_MMU
3149
            dump_mmu(stdout, fprintf, env);
3150
#endif
3151
            return;
3152
        }
3153
    case 0x54: // I-MMU data in
3154
        replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
3155
        return;
3156
    case 0x55: // I-MMU data access
3157
        {
3158
            // TODO: auto demap
3159

    
3160
            unsigned int i = (addr >> 3) & 0x3f;
3161

    
3162
            replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
3163

    
3164
#ifdef DEBUG_MMU
3165
            DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
3166
            dump_mmu(stdout, fprintf, env);
3167
#endif
3168
            return;
3169
        }
3170
    case 0x57: // I-MMU demap
3171
        demap_tlb(env->itlb, addr, "immu", env);
3172
        return;
3173
    case 0x58: // D-MMU regs
3174
        {
3175
            int reg = (addr >> 3) & 0xf;
3176
            uint64_t oldreg;
3177

    
3178
            oldreg = env->dmmuregs[reg];
3179
            switch(reg) {
3180
            case 0: // RO
3181
            case 4:
3182
                return;
3183
            case 3: // SFSR
3184
                if ((val & 1) == 0) {
3185
                    val = 0; // Clear SFSR, Fault address
3186
                    env->dmmu.sfar = 0;
3187
                }
3188
                env->dmmu.sfsr = val;
3189
                break;
3190
            case 1: // Primary context
3191
                env->dmmu.mmu_primary_context = val;
3192
                /* can be optimized to only flush MMU_USER_IDX
3193
                   and MMU_KERNEL_IDX entries */
3194
                tlb_flush(env, 1);
3195
                break;
3196
            case 2: // Secondary context
3197
                env->dmmu.mmu_secondary_context = val;
3198
                /* can be optimized to only flush MMU_USER_SECONDARY_IDX
3199
                   and MMU_KERNEL_SECONDARY_IDX entries */
3200
                tlb_flush(env, 1);
3201
                break;
3202
            case 5: // TSB access
3203
                DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
3204
                            PRIx64 "\n", env->dmmu.tsb, val);
3205
                env->dmmu.tsb = val;
3206
                break;
3207
            case 6: // Tag access
3208
                env->dmmu.tag_access = val;
3209
                break;
3210
            case 7: // Virtual Watchpoint
3211
            case 8: // Physical Watchpoint
3212
            default:
3213
                env->dmmuregs[reg] = val;
3214
                break;
3215
            }
3216

    
3217
            if (oldreg != env->dmmuregs[reg]) {
3218
                DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
3219
                            PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
3220
            }
3221
#ifdef DEBUG_MMU
3222
            dump_mmu(stdout, fprintf, env);
3223
#endif
3224
            return;
3225
        }
3226
    case 0x5c: // D-MMU data in
3227
        replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
3228
        return;
3229
    case 0x5d: // D-MMU data access
3230
        {
3231
            unsigned int i = (addr >> 3) & 0x3f;
3232

    
3233
            replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
3234

    
3235
#ifdef DEBUG_MMU
3236
            DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
3237
            dump_mmu(stdout, fprintf, env);
3238
#endif
3239
            return;
3240
        }
3241
    case 0x5f: // D-MMU demap
3242
        demap_tlb(env->dtlb, addr, "dmmu", env);
3243
        return;
3244
    case 0x49: // Interrupt data receive
3245
        // XXX
3246
        return;
3247
    case 0x46: // D-cache data
3248
    case 0x47: // D-cache tag access
3249
    case 0x4b: // E-cache error enable
3250
    case 0x4c: // E-cache asynchronous fault status
3251
    case 0x4d: // E-cache asynchronous fault address
3252
    case 0x4e: // E-cache tag data
3253
    case 0x66: // I-cache instruction access
3254
    case 0x67: // I-cache tag access
3255
    case 0x6e: // I-cache predecode
3256
    case 0x6f: // I-cache LRU etc.
3257
    case 0x76: // E-cache tag
3258
    case 0x7e: // E-cache tag
3259
        return;
3260
    case 0x51: // I-MMU 8k TSB pointer, RO
3261
    case 0x52: // I-MMU 64k TSB pointer, RO
3262
    case 0x56: // I-MMU tag read, RO
3263
    case 0x59: // D-MMU 8k TSB pointer, RO
3264
    case 0x5a: // D-MMU 64k TSB pointer, RO
3265
    case 0x5b: // D-MMU data pointer, RO
3266
    case 0x5e: // D-MMU tag read, RO
3267
    case 0x48: // Interrupt dispatch, RO
3268
    case 0x7f: // Incoming interrupt vector, RO
3269
    case 0x82: // Primary no-fault, RO
3270
    case 0x83: // Secondary no-fault, RO
3271
    case 0x8a: // Primary no-fault LE, RO
3272
    case 0x8b: // Secondary no-fault LE, RO
3273
    default:
3274
        do_unassigned_access(addr, 1, 0, 1, size);
3275
        return;
3276
    }
3277
}
3278
#endif /* CONFIG_USER_ONLY */
3279

    
3280
void helper_ldda_asi(target_ulong addr, int asi, int rd)
3281
{
3282
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
3283
        || (cpu_has_hypervisor(env)
3284
            && asi >= 0x30 && asi < 0x80
3285
            && !(env->hpstate & HS_PRIV)))
3286
        raise_exception(TT_PRIV_ACT);
3287

    
3288
    addr = asi_address_mask(env, asi, addr);
3289

    
3290
    switch (asi) {
3291
#if !defined(CONFIG_USER_ONLY)
3292
    case 0x24: // Nucleus quad LDD 128 bit atomic
3293
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
3294
        helper_check_align(addr, 0xf);
3295
        if (rd == 0) {
3296
            env->gregs[1] = ldq_nucleus(addr + 8);
3297
            if (asi == 0x2c)
3298
                bswap64s(&env->gregs[1]);
3299
        } else if (rd < 8) {
3300
            env->gregs[rd] = ldq_nucleus(addr);
3301
            env->gregs[rd + 1] = ldq_nucleus(addr + 8);
3302
            if (asi == 0x2c) {
3303
                bswap64s(&env->gregs[rd]);
3304
                bswap64s(&env->gregs[rd + 1]);
3305
            }
3306
        } else {
3307
            env->regwptr[rd] = ldq_nucleus(addr);
3308
            env->regwptr[rd + 1] = ldq_nucleus(addr + 8);
3309
            if (asi == 0x2c) {
3310
                bswap64s(&env->regwptr[rd]);
3311
                bswap64s(&env->regwptr[rd + 1]);
3312
            }
3313
        }
3314
        break;
3315
#endif
3316
    default:
3317
        helper_check_align(addr, 0x3);
3318
        if (rd == 0)
3319
            env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
3320
        else if (rd < 8) {
3321
            env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
3322
            env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3323
        } else {
3324
            env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
3325
            env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3326
        }
3327
        break;
3328
    }
3329
}
3330

    
3331
void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
3332
{
3333
    unsigned int i;
3334
    CPU_DoubleU u;
3335

    
3336
    helper_check_align(addr, 3);
3337
    addr = asi_address_mask(env, asi, addr);
3338

    
3339
    switch (asi) {
3340
    case 0xf0: // Block load primary
3341
    case 0xf1: // Block load secondary
3342
    case 0xf8: // Block load primary LE
3343
    case 0xf9: // Block load secondary LE
3344
        if (rd & 7) {
3345
            raise_exception(TT_ILL_INSN);
3346
            return;
3347
        }
3348
        helper_check_align(addr, 0x3f);
3349
        for (i = 0; i < 16; i++) {
3350
            *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
3351
                                                         0);
3352
            addr += 4;
3353
        }
3354

    
3355
        return;
3356
    case 0x16: /* UA2007 Block load primary, user privilege */
3357
    case 0x17: /* UA2007 Block load secondary, user privilege */
3358
    case 0x1e: /* UA2007 Block load primary LE, user privilege */
3359
    case 0x1f: /* UA2007 Block load secondary LE, user privilege */
3360
    case 0x70: // Block load primary, user privilege
3361
    case 0x71: // Block load secondary, user privilege
3362
        if (rd & 7) {
3363
            raise_exception(TT_ILL_INSN);
3364
            return;
3365
        }
3366
        helper_check_align(addr, 0x3f);
3367
        for (i = 0; i < 16; i++) {
3368
            *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x19, 4,
3369
                                                         0);
3370
            addr += 4;
3371
        }
3372

    
3373
        return;
3374
    default:
3375
        break;
3376
    }
3377

    
3378
    switch(size) {
3379
    default:
3380
    case 4:
3381
        *((uint32_t *)&env->fpr[rd]) = helper_ld_asi(addr, asi, size, 0);
3382
        break;
3383
    case 8:
3384
        u.ll = helper_ld_asi(addr, asi, size, 0);
3385
        *((uint32_t *)&env->fpr[rd++]) = u.l.upper;
3386
        *((uint32_t *)&env->fpr[rd++]) = u.l.lower;
3387
        break;
3388
    case 16:
3389
        u.ll = helper_ld_asi(addr, asi, 8, 0);
3390
        *((uint32_t *)&env->fpr[rd++]) = u.l.upper;
3391
        *((uint32_t *)&env->fpr[rd++]) = u.l.lower;
3392
        u.ll = helper_ld_asi(addr + 8, asi, 8, 0);
3393
        *((uint32_t *)&env->fpr[rd++]) = u.l.upper;
3394
        *((uint32_t *)&env->fpr[rd++]) = u.l.lower;
3395
        break;
3396
    }
3397
}
3398

    
3399
void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
3400
{
3401
    unsigned int i;
3402
    target_ulong val = 0;
3403
    CPU_DoubleU u;
3404

    
3405
    helper_check_align(addr, 3);
3406
    addr = asi_address_mask(env, asi, addr);
3407

    
3408
    switch (asi) {
3409
    case 0xe0: // UA2007 Block commit store primary (cache flush)
3410
    case 0xe1: // UA2007 Block commit store secondary (cache flush)
3411
    case 0xf0: // Block store primary
3412
    case 0xf1: // Block store secondary
3413
    case 0xf8: // Block store primary LE
3414
    case 0xf9: // Block store secondary LE
3415
        if (rd & 7) {
3416
            raise_exception(TT_ILL_INSN);
3417
            return;
3418
        }
3419
        helper_check_align(addr, 0x3f);
3420
        for (i = 0; i < 16; i++) {
3421
            val = *(uint32_t *)&env->fpr[rd++];
3422
            helper_st_asi(addr, val, asi & 0x8f, 4);
3423
            addr += 4;
3424
        }
3425

    
3426
        return;
3427
    case 0x16: /* UA2007 Block load primary, user privilege */
3428
    case 0x17: /* UA2007 Block load secondary, user privilege */
3429
    case 0x1e: /* UA2007 Block load primary LE, user privilege */
3430
    case 0x1f: /* UA2007 Block load secondary LE, user privilege */
3431
    case 0x70: // Block store primary, user privilege
3432
    case 0x71: // Block store secondary, user privilege
3433
        if (rd & 7) {
3434
            raise_exception(TT_ILL_INSN);
3435
            return;
3436
        }
3437
        helper_check_align(addr, 0x3f);
3438
        for (i = 0; i < 16; i++) {
3439
            val = *(uint32_t *)&env->fpr[rd++];
3440
            helper_st_asi(addr, val, asi & 0x19, 4);
3441
            addr += 4;
3442
        }
3443

    
3444
        return;
3445
    default:
3446
        break;
3447
    }
3448

    
3449
    switch(size) {
3450
    default:
3451
    case 4:
3452
        helper_st_asi(addr, *(uint32_t *)&env->fpr[rd], asi, size);
3453
        break;
3454
    case 8:
3455
        u.l.upper = *(uint32_t *)&env->fpr[rd++];
3456
        u.l.lower = *(uint32_t *)&env->fpr[rd++];
3457
        helper_st_asi(addr, u.ll, asi, size);
3458
        break;
3459
    case 16:
3460
        u.l.upper = *(uint32_t *)&env->fpr[rd++];
3461
        u.l.lower = *(uint32_t *)&env->fpr[rd++];
3462
        helper_st_asi(addr, u.ll, asi, 8);
3463
        u.l.upper = *(uint32_t *)&env->fpr[rd++];
3464
        u.l.lower = *(uint32_t *)&env->fpr[rd++];
3465
        helper_st_asi(addr + 8, u.ll, asi, 8);
3466
        break;
3467
    }
3468
}
3469

    
3470
target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
3471
                            target_ulong val2, uint32_t asi)
3472
{
3473
    target_ulong ret;
3474

    
3475
    val2 &= 0xffffffffUL;
3476
    ret = helper_ld_asi(addr, asi, 4, 0);
3477
    ret &= 0xffffffffUL;
3478
    if (val2 == ret)
3479
        helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4);
3480
    return ret;
3481
}
3482

    
3483
target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
3484
                             target_ulong val2, uint32_t asi)
3485
{
3486
    target_ulong ret;
3487

    
3488
    ret = helper_ld_asi(addr, asi, 8, 0);
3489
    if (val2 == ret)
3490
        helper_st_asi(addr, val1, asi, 8);
3491
    return ret;
3492
}
3493
#endif /* TARGET_SPARC64 */
3494

    
3495
#ifndef TARGET_SPARC64
3496
void helper_rett(void)
3497
{
3498
    unsigned int cwp;
3499

    
3500
    if (env->psret == 1)
3501
        raise_exception(TT_ILL_INSN);
3502

    
3503
    env->psret = 1;
3504
    cwp = cwp_inc(env->cwp + 1) ;
3505
    if (env->wim & (1 << cwp)) {
3506
        raise_exception(TT_WIN_UNF);
3507
    }
3508
    set_cwp(cwp);
3509
    env->psrs = env->psrps;
3510
}
3511
#endif
3512

    
3513
static target_ulong helper_udiv_common(target_ulong a, target_ulong b, int cc)
3514
{
3515
    int overflow = 0;
3516
    uint64_t x0;
3517
    uint32_t x1;
3518

    
3519
    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3520
    x1 = (b & 0xffffffff);
3521

    
3522
    if (x1 == 0) {
3523
        raise_exception(TT_DIV_ZERO);
3524
    }
3525

    
3526
    x0 = x0 / x1;
3527
    if (x0 > 0xffffffff) {
3528
        x0 = 0xffffffff;
3529
        overflow = 1;
3530
    }
3531

    
3532
    if (cc) {
3533
        env->cc_dst = x0;
3534
        env->cc_src2 = overflow;
3535
        env->cc_op = CC_OP_DIV;
3536
    }
3537
    return x0;
3538
}
3539

    
3540
target_ulong helper_udiv(target_ulong a, target_ulong b)
3541
{
3542
    return helper_udiv_common(a, b, 0);
3543
}
3544

    
3545
target_ulong helper_udiv_cc(target_ulong a, target_ulong b)
3546
{
3547
    return helper_udiv_common(a, b, 1);
3548
}
3549

    
3550
static target_ulong helper_sdiv_common(target_ulong a, target_ulong b, int cc)
3551
{
3552
    int overflow = 0;
3553
    int64_t x0;
3554
    int32_t x1;
3555

    
3556
    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3557
    x1 = (b & 0xffffffff);
3558

    
3559
    if (x1 == 0) {
3560
        raise_exception(TT_DIV_ZERO);
3561
    }
3562

    
3563
    x0 = x0 / x1;
3564
    if ((int32_t) x0 != x0) {
3565
        x0 = x0 < 0 ? 0x80000000: 0x7fffffff;
3566
        overflow = 1;
3567
    }
3568

    
3569
    if (cc) {
3570
        env->cc_dst = x0;
3571
        env->cc_src2 = overflow;
3572
        env->cc_op = CC_OP_DIV;
3573
    }
3574
    return x0;
3575
}
3576

    
3577
target_ulong helper_sdiv(target_ulong a, target_ulong b)
3578
{
3579
    return helper_sdiv_common(a, b, 0);
3580
}
3581

    
3582
target_ulong helper_sdiv_cc(target_ulong a, target_ulong b)
3583
{
3584
    return helper_sdiv_common(a, b, 1);
3585
}
3586

    
3587
void helper_stdf(target_ulong addr, int mem_idx)
3588
{
3589
    helper_check_align(addr, 7);
3590
#if !defined(CONFIG_USER_ONLY)
3591
    switch (mem_idx) {
3592
    case MMU_USER_IDX:
3593
        stfq_user(addr, DT0);
3594
        break;
3595
    case MMU_KERNEL_IDX:
3596
        stfq_kernel(addr, DT0);
3597
        break;
3598
#ifdef TARGET_SPARC64
3599
    case MMU_HYPV_IDX:
3600
        stfq_hypv(addr, DT0);
3601
        break;
3602
#endif
3603
    default:
3604
        DPRINTF_MMU("helper_stdf: need to check MMU idx %d\n", mem_idx);
3605
        break;
3606
    }
3607
#else
3608
    stfq_raw(address_mask(env, addr), DT0);
3609
#endif
3610
}
3611

    
3612
void helper_lddf(target_ulong addr, int mem_idx)
3613
{
3614
    helper_check_align(addr, 7);
3615
#if !defined(CONFIG_USER_ONLY)
3616
    switch (mem_idx) {
3617
    case MMU_USER_IDX:
3618
        DT0 = ldfq_user(addr);
3619
        break;
3620
    case MMU_KERNEL_IDX:
3621
        DT0 = ldfq_kernel(addr);
3622
        break;
3623
#ifdef TARGET_SPARC64
3624
    case MMU_HYPV_IDX:
3625
        DT0 = ldfq_hypv(addr);
3626
        break;
3627
#endif
3628
    default:
3629
        DPRINTF_MMU("helper_lddf: need to check MMU idx %d\n", mem_idx);
3630
        break;
3631
    }
3632
#else
3633
    DT0 = ldfq_raw(address_mask(env, addr));
3634
#endif
3635
}
3636

    
3637
void helper_ldqf(target_ulong addr, int mem_idx)
3638
{
3639
    // XXX add 128 bit load
3640
    CPU_QuadU u;
3641

    
3642
    helper_check_align(addr, 7);
3643
#if !defined(CONFIG_USER_ONLY)
3644
    switch (mem_idx) {
3645
    case MMU_USER_IDX:
3646
        u.ll.upper = ldq_user(addr);
3647
        u.ll.lower = ldq_user(addr + 8);
3648
        QT0 = u.q;
3649
        break;
3650
    case MMU_KERNEL_IDX:
3651
        u.ll.upper = ldq_kernel(addr);
3652
        u.ll.lower = ldq_kernel(addr + 8);
3653
        QT0 = u.q;
3654
        break;
3655
#ifdef TARGET_SPARC64
3656
    case MMU_HYPV_IDX:
3657
        u.ll.upper = ldq_hypv(addr);
3658
        u.ll.lower = ldq_hypv(addr + 8);
3659
        QT0 = u.q;
3660
        break;
3661
#endif
3662
    default:
3663
        DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx);
3664
        break;
3665
    }
3666
#else
3667
    u.ll.upper = ldq_raw(address_mask(env, addr));
3668
    u.ll.lower = ldq_raw(address_mask(env, addr + 8));
3669
    QT0 = u.q;
3670
#endif
3671
}
3672

    
3673
void helper_stqf(target_ulong addr, int mem_idx)
3674
{
3675
    // XXX add 128 bit store
3676
    CPU_QuadU u;
3677

    
3678
    helper_check_align(addr, 7);
3679
#if !defined(CONFIG_USER_ONLY)
3680
    switch (mem_idx) {
3681
    case MMU_USER_IDX:
3682
        u.q = QT0;
3683
        stq_user(addr, u.ll.upper);
3684
        stq_user(addr + 8, u.ll.lower);
3685
        break;
3686
    case MMU_KERNEL_IDX:
3687
        u.q = QT0;
3688
        stq_kernel(addr, u.ll.upper);
3689
        stq_kernel(addr + 8, u.ll.lower);
3690
        break;
3691
#ifdef TARGET_SPARC64
3692
    case MMU_HYPV_IDX:
3693
        u.q = QT0;
3694
        stq_hypv(addr, u.ll.upper);
3695
        stq_hypv(addr + 8, u.ll.lower);
3696
        break;
3697
#endif
3698
    default:
3699
        DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx);
3700
        break;
3701
    }
3702
#else
3703
    u.q = QT0;
3704
    stq_raw(address_mask(env, addr), u.ll.upper);
3705
    stq_raw(address_mask(env, addr + 8), u.ll.lower);
3706
#endif
3707
}
3708

    
3709
static inline void set_fsr(void)
3710
{
3711
    int rnd_mode;
3712

    
3713
    switch (env->fsr & FSR_RD_MASK) {
3714
    case FSR_RD_NEAREST:
3715
        rnd_mode = float_round_nearest_even;
3716
        break;
3717
    default:
3718
    case FSR_RD_ZERO:
3719
        rnd_mode = float_round_to_zero;
3720
        break;
3721
    case FSR_RD_POS:
3722
        rnd_mode = float_round_up;
3723
        break;
3724
    case FSR_RD_NEG:
3725
        rnd_mode = float_round_down;
3726
        break;
3727
    }
3728
    set_float_rounding_mode(rnd_mode, &env->fp_status);
3729
}
3730

    
3731
void helper_ldfsr(uint32_t new_fsr)
3732
{
3733
    env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK);
3734
    set_fsr();
3735
}
3736

    
3737
#ifdef TARGET_SPARC64
3738
void helper_ldxfsr(uint64_t new_fsr)
3739
{
3740
    env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK);
3741
    set_fsr();
3742
}
3743
#endif
3744

    
3745
void helper_debug(void)
3746
{
3747
    env->exception_index = EXCP_DEBUG;
3748
    cpu_loop_exit(env);
3749
}
3750

    
3751
#ifndef TARGET_SPARC64
3752
/* XXX: use another pointer for %iN registers to avoid slow wrapping
3753
   handling ? */
3754
void helper_save(void)
3755
{
3756
    uint32_t cwp;
3757

    
3758
    cwp = cwp_dec(env->cwp - 1);
3759
    if (env->wim & (1 << cwp)) {
3760
        raise_exception(TT_WIN_OVF);
3761
    }
3762
    set_cwp(cwp);
3763
}
3764

    
3765
void helper_restore(void)
3766
{
3767
    uint32_t cwp;
3768

    
3769
    cwp = cwp_inc(env->cwp + 1);
3770
    if (env->wim & (1 << cwp)) {
3771
        raise_exception(TT_WIN_UNF);
3772
    }
3773
    set_cwp(cwp);
3774
}
3775

    
3776
void helper_wrpsr(target_ulong new_psr)
3777
{
3778
    if ((new_psr & PSR_CWP) >= env->nwindows) {
3779
        raise_exception(TT_ILL_INSN);
3780
    } else {
3781
        cpu_put_psr(env, new_psr);
3782
    }
3783
}
3784

    
3785
target_ulong helper_rdpsr(void)
3786
{
3787
    return get_psr();
3788
}
3789

    
3790
#else
3791
/* XXX: use another pointer for %iN registers to avoid slow wrapping
3792
   handling ? */
3793
void helper_save(void)
3794
{
3795
    uint32_t cwp;
3796

    
3797
    cwp = cwp_dec(env->cwp - 1);
3798
    if (env->cansave == 0) {
3799
        raise_exception(TT_SPILL | (env->otherwin != 0 ?
3800
                                    (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3801
                                    ((env->wstate & 0x7) << 2)));
3802
    } else {
3803
        if (env->cleanwin - env->canrestore == 0) {
3804
            // XXX Clean windows without trap
3805
            raise_exception(TT_CLRWIN);
3806
        } else {
3807
            env->cansave--;
3808
            env->canrestore++;
3809
            set_cwp(cwp);
3810
        }
3811
    }
3812
}
3813

    
3814
void helper_restore(void)
3815
{
3816
    uint32_t cwp;
3817

    
3818
    cwp = cwp_inc(env->cwp + 1);
3819
    if (env->canrestore == 0) {
3820
        raise_exception(TT_FILL | (env->otherwin != 0 ?
3821
                                   (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3822
                                   ((env->wstate & 0x7) << 2)));
3823
    } else {
3824
        env->cansave++;
3825
        env->canrestore--;
3826
        set_cwp(cwp);
3827
    }
3828
}
3829

    
3830
void helper_flushw(void)
3831
{
3832
    if (env->cansave != env->nwindows - 2) {
3833
        raise_exception(TT_SPILL | (env->otherwin != 0 ?
3834
                                    (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3835
                                    ((env->wstate & 0x7) << 2)));
3836
    }
3837
}
3838

    
3839
void helper_saved(void)
3840
{
3841
    env->cansave++;
3842
    if (env->otherwin == 0)
3843
        env->canrestore--;
3844
    else
3845
        env->otherwin--;
3846
}
3847

    
3848
void helper_restored(void)
3849
{
3850
    env->canrestore++;
3851
    if (env->cleanwin < env->nwindows - 1)
3852
        env->cleanwin++;
3853
    if (env->otherwin == 0)
3854
        env->cansave--;
3855
    else
3856
        env->otherwin--;
3857
}
3858

    
3859
static target_ulong get_ccr(void)
3860
{
3861
    target_ulong psr;
3862

    
3863
    psr = get_psr();
3864

    
3865
    return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20);
3866
}
3867

    
3868
target_ulong cpu_get_ccr(CPUState *env1)
3869
{
3870
    CPUState *saved_env;
3871
    target_ulong ret;
3872

    
3873
    saved_env = env;
3874
    env = env1;
3875
    ret = get_ccr();
3876
    env = saved_env;
3877
    return ret;
3878
}
3879

    
3880
static void put_ccr(target_ulong val)
3881
{
3882
    target_ulong tmp = val;
3883

    
3884
    env->xcc = (tmp >> 4) << 20;
3885
    env->psr = (tmp & 0xf) << 20;
3886
    CC_OP = CC_OP_FLAGS;
3887
}
3888

    
3889
void cpu_put_ccr(CPUState *env1, target_ulong val)
3890
{
3891
    CPUState *saved_env;
3892

    
3893
    saved_env = env;
3894
    env = env1;
3895
    put_ccr(val);
3896
    env = saved_env;
3897
}
3898

    
3899
static target_ulong get_cwp64(void)
3900
{
3901
    return env->nwindows - 1 - env->cwp;
3902
}
3903

    
3904
target_ulong cpu_get_cwp64(CPUState *env1)
3905
{
3906
    CPUState *saved_env;
3907
    target_ulong ret;
3908

    
3909
    saved_env = env;
3910
    env = env1;
3911
    ret = get_cwp64();
3912
    env = saved_env;
3913
    return ret;
3914
}
3915

    
3916
static void put_cwp64(int cwp)
3917
{
3918
    if (unlikely(cwp >= env->nwindows || cwp < 0)) {
3919
        cwp %= env->nwindows;
3920
    }
3921
    set_cwp(env->nwindows - 1 - cwp);
3922
}
3923

    
3924
void cpu_put_cwp64(CPUState *env1, int cwp)
3925
{
3926
    CPUState *saved_env;
3927

    
3928
    saved_env = env;
3929
    env = env1;
3930
    put_cwp64(cwp);
3931
    env = saved_env;
3932
}
3933

    
3934
target_ulong helper_rdccr(void)
3935
{
3936
    return get_ccr();
3937
}
3938

    
3939
void helper_wrccr(target_ulong new_ccr)
3940
{
3941
    put_ccr(new_ccr);
3942
}
3943

    
3944
// CWP handling is reversed in V9, but we still use the V8 register
3945
// order.
3946
target_ulong helper_rdcwp(void)
3947
{
3948
    return get_cwp64();
3949
}
3950

    
3951
void helper_wrcwp(target_ulong new_cwp)
3952
{
3953
    put_cwp64(new_cwp);
3954
}
3955

    
3956
// This function uses non-native bit order
3957
#define GET_FIELD(X, FROM, TO)                                  \
3958
    ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
3959

    
3960
// This function uses the order in the manuals, i.e. bit 0 is 2^0
3961
#define GET_FIELD_SP(X, FROM, TO)               \
3962
    GET_FIELD(X, 63 - (TO), 63 - (FROM))
3963

    
3964
target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
3965
{
3966
    return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
3967
        (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
3968
        (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
3969
        (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
3970
        (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
3971
        (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
3972
        (((pixel_addr >> 55) & 1) << 4) |
3973
        (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
3974
        GET_FIELD_SP(pixel_addr, 11, 12);
3975
}
3976

    
3977
target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
3978
{
3979
    uint64_t tmp;
3980

    
3981
    tmp = addr + offset;
3982
    env->gsr &= ~7ULL;
3983
    env->gsr |= tmp & 7ULL;
3984
    return tmp & ~7ULL;
3985
}
3986

    
3987
target_ulong helper_popc(target_ulong val)
3988
{
3989
    return ctpop64(val);
3990
}
3991

    
3992
static inline uint64_t *get_gregset(uint32_t pstate)
3993
{
3994
    switch (pstate) {
3995
    default:
3996
        DPRINTF_PSTATE("ERROR in get_gregset: active pstate bits=%x%s%s%s\n",
3997
                pstate,
3998
                (pstate & PS_IG) ? " IG" : "",
3999
                (pstate & PS_MG) ? " MG" : "",
4000
                (pstate & PS_AG) ? " AG" : "");
4001
        /* pass through to normal set of global registers */
4002
    case 0:
4003
        return env->bgregs;
4004
    case PS_AG:
4005
        return env->agregs;
4006
    case PS_MG:
4007
        return env->mgregs;
4008
    case PS_IG:
4009
        return env->igregs;
4010
    }
4011
}
4012

    
4013
static inline void change_pstate(uint32_t new_pstate)
4014
{
4015
    uint32_t pstate_regs, new_pstate_regs;
4016
    uint64_t *src, *dst;
4017

    
4018
    if (env->def->features & CPU_FEATURE_GL) {
4019
        // PS_AG is not implemented in this case
4020
        new_pstate &= ~PS_AG;
4021
    }
4022

    
4023
    pstate_regs = env->pstate & 0xc01;
4024
    new_pstate_regs = new_pstate & 0xc01;
4025

    
4026
    if (new_pstate_regs != pstate_regs) {
4027
        DPRINTF_PSTATE("change_pstate: switching regs old=%x new=%x\n",
4028
                       pstate_regs, new_pstate_regs);
4029
        // Switch global register bank
4030
        src = get_gregset(new_pstate_regs);
4031
        dst = get_gregset(pstate_regs);
4032
        memcpy32(dst, env->gregs);
4033
        memcpy32(env->gregs, src);
4034
    }
4035
    else {
4036
        DPRINTF_PSTATE("change_pstate: regs new=%x (unchanged)\n",
4037
                       new_pstate_regs);
4038
    }
4039
    env->pstate = new_pstate;
4040
}
4041

    
4042
void helper_wrpstate(target_ulong new_state)
4043
{
4044
    change_pstate(new_state & 0xf3f);
4045

    
4046
#if !defined(CONFIG_USER_ONLY)
4047
    if (cpu_interrupts_enabled(env)) {
4048
        cpu_check_irqs(env);
4049
    }
4050
#endif
4051
}
4052

    
4053
void cpu_change_pstate(CPUState *env1, uint32_t new_pstate)
4054
{
4055
    CPUState *saved_env;
4056

    
4057
    saved_env = env;
4058
    env = env1;
4059
    change_pstate(new_pstate);
4060
    env = saved_env;
4061
}
4062

    
4063
void helper_wrpil(target_ulong new_pil)
4064
{
4065
#if !defined(CONFIG_USER_ONLY)
4066
    DPRINTF_PSTATE("helper_wrpil old=%x new=%x\n",
4067
                   env->psrpil, (uint32_t)new_pil);
4068

    
4069
    env->psrpil = new_pil;
4070

    
4071
    if (cpu_interrupts_enabled(env)) {
4072
        cpu_check_irqs(env);
4073
    }
4074
#endif
4075
}
4076

    
4077
void helper_done(void)
4078
{
4079
    trap_state* tsptr = cpu_tsptr(env);
4080

    
4081
    env->pc = tsptr->tnpc;
4082
    env->npc = tsptr->tnpc + 4;
4083
    put_ccr(tsptr->tstate >> 32);
4084
    env->asi = (tsptr->tstate >> 24) & 0xff;
4085
    change_pstate((tsptr->tstate >> 8) & 0xf3f);
4086
    put_cwp64(tsptr->tstate & 0xff);
4087
    env->tl--;
4088

    
4089
    DPRINTF_PSTATE("... helper_done tl=%d\n", env->tl);
4090

    
4091
#if !defined(CONFIG_USER_ONLY)
4092
    if (cpu_interrupts_enabled(env)) {
4093
        cpu_check_irqs(env);
4094
    }
4095
#endif
4096
}
4097

    
4098
void helper_retry(void)
4099
{
4100
    trap_state* tsptr = cpu_tsptr(env);
4101

    
4102
    env->pc = tsptr->tpc;
4103
    env->npc = tsptr->tnpc;
4104
    put_ccr(tsptr->tstate >> 32);
4105
    env->asi = (tsptr->tstate >> 24) & 0xff;
4106
    change_pstate((tsptr->tstate >> 8) & 0xf3f);
4107
    put_cwp64(tsptr->tstate & 0xff);
4108
    env->tl--;
4109

    
4110
    DPRINTF_PSTATE("... helper_retry tl=%d\n", env->tl);
4111

    
4112
#if !defined(CONFIG_USER_ONLY)
4113
    if (cpu_interrupts_enabled(env)) {
4114
        cpu_check_irqs(env);
4115
    }
4116
#endif
4117
}
4118

    
4119
static void do_modify_softint(const char* operation, uint32_t value)
4120
{
4121
    if (env->softint != value) {
4122
        env->softint = value;
4123
        DPRINTF_PSTATE(": %s new %08x\n", operation, env->softint);
4124
#if !defined(CONFIG_USER_ONLY)
4125
        if (cpu_interrupts_enabled(env)) {
4126
            cpu_check_irqs(env);
4127
        }
4128
#endif
4129
    }
4130
}
4131

    
4132
void helper_set_softint(uint64_t value)
4133
{
4134
    do_modify_softint("helper_set_softint", env->softint | (uint32_t)value);
4135
}
4136

    
4137
void helper_clear_softint(uint64_t value)
4138
{
4139
    do_modify_softint("helper_clear_softint", env->softint & (uint32_t)~value);
4140
}
4141

    
4142
void helper_write_softint(uint64_t value)
4143
{
4144
    do_modify_softint("helper_write_softint", (uint32_t)value);
4145
}
4146
#endif
4147

    
4148
#ifdef TARGET_SPARC64
4149
trap_state* cpu_tsptr(CPUState* env)
4150
{
4151
    return &env->ts[env->tl & MAXTL_MASK];
4152
}
4153
#endif
4154

    
4155
#if !defined(CONFIG_USER_ONLY)
4156

    
4157
static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4158
                                void *retaddr);
4159

    
4160
#define MMUSUFFIX _mmu
4161
#define ALIGNED_ONLY
4162

    
4163
#define SHIFT 0
4164
#include "softmmu_template.h"
4165

    
4166
#define SHIFT 1
4167
#include "softmmu_template.h"
4168

    
4169
#define SHIFT 2
4170
#include "softmmu_template.h"
4171

    
4172
#define SHIFT 3
4173
#include "softmmu_template.h"
4174

    
4175
/* XXX: make it generic ? */
4176
static void cpu_restore_state2(void *retaddr)
4177
{
4178
    TranslationBlock *tb;
4179
    unsigned long pc;
4180

    
4181
    if (retaddr) {
4182
        /* now we have a real cpu fault */
4183
        pc = (unsigned long)retaddr;
4184
        tb = tb_find_pc(pc);
4185
        if (tb) {
4186
            /* the PC is inside the translated code. It means that we have
4187
               a virtual CPU fault */
4188
            cpu_restore_state(tb, env, pc);
4189
        }
4190
    }
4191
}
4192

    
4193
static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4194
                                void *retaddr)
4195
{
4196
#ifdef DEBUG_UNALIGNED
4197
    printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
4198
           "\n", addr, env->pc);
4199
#endif
4200
    cpu_restore_state2(retaddr);
4201
    raise_exception(TT_UNALIGNED);
4202
}
4203

    
4204
/* try to fill the TLB and return an exception if error. If retaddr is
4205
   NULL, it means that the function was called in C code (i.e. not
4206
   from generated code or from helper.c) */
4207
/* XXX: fix it to restore all registers */
4208
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4209
{
4210
    int ret;
4211
    CPUState *saved_env;
4212

    
4213
    /* XXX: hack to restore env in all cases, even if not called from
4214
       generated code */
4215
    saved_env = env;
4216
    env = cpu_single_env;
4217

    
4218
    ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4219
    if (ret) {
4220
        cpu_restore_state2(retaddr);
4221
        cpu_loop_exit(env);
4222
    }
4223
    env = saved_env;
4224
}
4225

    
4226
#endif /* !CONFIG_USER_ONLY */
4227

    
4228
#ifndef TARGET_SPARC64
4229
#if !defined(CONFIG_USER_ONLY)
4230
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
4231
                          int is_asi, int size)
4232
{
4233
    CPUState *saved_env;
4234
    int fault_type;
4235

    
4236
    /* XXX: hack to restore env in all cases, even if not called from
4237
       generated code */
4238
    saved_env = env;
4239
    env = cpu_single_env;
4240
#ifdef DEBUG_UNASSIGNED
4241
    if (is_asi)
4242
        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4243
               " asi 0x%02x from " TARGET_FMT_lx "\n",
4244
               is_exec ? "exec" : is_write ? "write" : "read", size,
4245
               size == 1 ? "" : "s", addr, is_asi, env->pc);
4246
    else
4247
        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4248
               " from " TARGET_FMT_lx "\n",
4249
               is_exec ? "exec" : is_write ? "write" : "read", size,
4250
               size == 1 ? "" : "s", addr, env->pc);
4251
#endif
4252
    /* Don't overwrite translation and access faults */
4253
    fault_type = (env->mmuregs[3] & 0x1c) >> 2;
4254
    if ((fault_type > 4) || (fault_type == 0)) {
4255
        env->mmuregs[3] = 0; /* Fault status register */
4256
        if (is_asi)
4257
            env->mmuregs[3] |= 1 << 16;
4258
        if (env->psrs)
4259
            env->mmuregs[3] |= 1 << 5;
4260
        if (is_exec)
4261
            env->mmuregs[3] |= 1 << 6;
4262
        if (is_write)
4263
            env->mmuregs[3] |= 1 << 7;
4264
        env->mmuregs[3] |= (5 << 2) | 2;
4265
        /* SuperSPARC will never place instruction fault addresses in the FAR */
4266
        if (!is_exec) {
4267
            env->mmuregs[4] = addr; /* Fault address register */
4268
        }
4269
    }
4270
    /* overflow (same type fault was not read before another fault) */
4271
    if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
4272
        env->mmuregs[3] |= 1;
4273
    }
4274

    
4275
    if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
4276
        if (is_exec)
4277
            raise_exception(TT_CODE_ACCESS);
4278
        else
4279
            raise_exception(TT_DATA_ACCESS);
4280
    }
4281

    
4282
    /* flush neverland mappings created during no-fault mode,
4283
       so the sequential MMU faults report proper fault types */
4284
    if (env->mmuregs[0] & MMU_NF) {
4285
        tlb_flush(env, 1);
4286
    }
4287

    
4288
    env = saved_env;
4289
}
4290
#endif
4291
#else
4292
#if defined(CONFIG_USER_ONLY)
4293
static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
4294
                          int is_asi, int size)
4295
#else
4296
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
4297
                          int is_asi, int size)
4298
#endif
4299
{
4300
    CPUState *saved_env;
4301

    
4302
    /* XXX: hack to restore env in all cases, even if not called from
4303
       generated code */
4304
    saved_env = env;
4305
    env = cpu_single_env;
4306

    
4307
#ifdef DEBUG_UNASSIGNED
4308
    printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
4309
           "\n", addr, env->pc);
4310
#endif
4311

    
4312
    if (is_exec)
4313
        raise_exception(TT_CODE_ACCESS);
4314
    else
4315
        raise_exception(TT_DATA_ACCESS);
4316

    
4317
    env = saved_env;
4318
}
4319
#endif
4320

    
4321

    
4322
#ifdef TARGET_SPARC64
4323
void helper_tick_set_count(void *opaque, uint64_t count)
4324
{
4325
#if !defined(CONFIG_USER_ONLY)
4326
    cpu_tick_set_count(opaque, count);
4327
#endif
4328
}
4329

    
4330
uint64_t helper_tick_get_count(void *opaque)
4331
{
4332
#if !defined(CONFIG_USER_ONLY)
4333
    return cpu_tick_get_count(opaque);
4334
#else
4335
    return 0;
4336
#endif
4337
}
4338

    
4339
void helper_tick_set_limit(void *opaque, uint64_t limit)
4340
{
4341
#if !defined(CONFIG_USER_ONLY)
4342
    cpu_tick_set_limit(opaque, limit);
4343
#endif
4344
}
4345
#endif