Statistics
| Branch: | Revision:

root / target-sparc / op_helper.c @ b5176d27

History | View | Annotate | Download (118.4 kB)

1
#include "exec.h"
2
#include "host-utils.h"
3
#include "helper.h"
4
#include "sysemu.h"
5

    
6
//#define DEBUG_MMU
7
//#define DEBUG_MXCC
8
//#define DEBUG_UNALIGNED
9
//#define DEBUG_UNASSIGNED
10
//#define DEBUG_ASI
11
//#define DEBUG_PCALL
12
//#define DEBUG_PSTATE
13
//#define DEBUG_CACHE_CONTROL
14

    
15
#ifdef DEBUG_MMU
16
#define DPRINTF_MMU(fmt, ...)                                   \
17
    do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
18
#else
19
#define DPRINTF_MMU(fmt, ...) do {} while (0)
20
#endif
21

    
22
#ifdef DEBUG_MXCC
23
#define DPRINTF_MXCC(fmt, ...)                                  \
24
    do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
25
#else
26
#define DPRINTF_MXCC(fmt, ...) do {} while (0)
27
#endif
28

    
29
#ifdef DEBUG_ASI
30
#define DPRINTF_ASI(fmt, ...)                                   \
31
    do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
32
#endif
33

    
34
#ifdef DEBUG_PSTATE
35
#define DPRINTF_PSTATE(fmt, ...)                                   \
36
    do { printf("PSTATE: " fmt , ## __VA_ARGS__); } while (0)
37
#else
38
#define DPRINTF_PSTATE(fmt, ...) do {} while (0)
39
#endif
40

    
41
#ifdef DEBUG_CACHE_CONTROL
42
#define DPRINTF_CACHE_CONTROL(fmt, ...)                                   \
43
    do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
44
#else
45
#define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
46
#endif
47

    
48
#ifdef TARGET_SPARC64
49
#ifndef TARGET_ABI32
50
#define AM_CHECK(env1) ((env1)->pstate & PS_AM)
51
#else
52
#define AM_CHECK(env1) (1)
53
#endif
54
#endif
55

    
56
#define DT0 (env->dt0)
57
#define DT1 (env->dt1)
58
#define QT0 (env->qt0)
59
#define QT1 (env->qt1)
60

    
61
/* Leon3 cache control */
62

    
63
/* Cache control: emulate the behavior of cache control registers but without
64
   any effect on the emulated */
65

    
66
#define CACHE_STATE_MASK 0x3
67
#define CACHE_DISABLED   0x0
68
#define CACHE_FROZEN     0x1
69
#define CACHE_ENABLED    0x3
70

    
71
/* Cache Control register fields */
72

    
73
#define CACHE_CTRL_IF (1 <<  4)  /* Instruction Cache Freeze on Interrupt */
74
#define CACHE_CTRL_DF (1 <<  5)  /* Data Cache Freeze on Interrupt */
75
#define CACHE_CTRL_DP (1 << 14)  /* Data cache flush pending */
76
#define CACHE_CTRL_IP (1 << 15)  /* Instruction cache flush pending */
77
#define CACHE_CTRL_IB (1 << 16)  /* Instruction burst fetch */
78
#define CACHE_CTRL_FI (1 << 21)  /* Flush Instruction cache (Write only) */
79
#define CACHE_CTRL_FD (1 << 22)  /* Flush Data cache (Write only) */
80
#define CACHE_CTRL_DS (1 << 23)  /* Data cache snoop enable */
81

    
82
#if defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
83
static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
84
                          int is_asi, int size);
85
#endif
86

    
87
#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
88
// Calculates TSB pointer value for fault page size 8k or 64k
89
static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
90
                                       uint64_t tag_access_register,
91
                                       int page_size)
92
{
93
    uint64_t tsb_base = tsb_register & ~0x1fffULL;
94
    int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
95
    int tsb_size  = tsb_register & 0xf;
96

    
97
    // discard lower 13 bits which hold tag access context
98
    uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
99

    
100
    // now reorder bits
101
    uint64_t tsb_base_mask = ~0x1fffULL;
102
    uint64_t va = tag_access_va;
103

    
104
    // move va bits to correct position
105
    if (page_size == 8*1024) {
106
        va >>= 9;
107
    } else if (page_size == 64*1024) {
108
        va >>= 12;
109
    }
110

    
111
    if (tsb_size) {
112
        tsb_base_mask <<= tsb_size;
113
    }
114

    
115
    // calculate tsb_base mask and adjust va if split is in use
116
    if (tsb_split) {
117
        if (page_size == 8*1024) {
118
            va &= ~(1ULL << (13 + tsb_size));
119
        } else if (page_size == 64*1024) {
120
            va |= (1ULL << (13 + tsb_size));
121
        }
122
        tsb_base_mask <<= 1;
123
    }
124

    
125
    return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
126
}
127

    
128
// Calculates tag target register value by reordering bits
129
// in tag access register
130
static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
131
{
132
    return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
133
}
134

    
135
static void replace_tlb_entry(SparcTLBEntry *tlb,
136
                              uint64_t tlb_tag, uint64_t tlb_tte,
137
                              CPUState *env1)
138
{
139
    target_ulong mask, size, va, offset;
140

    
141
    // flush page range if translation is valid
142
    if (TTE_IS_VALID(tlb->tte)) {
143

    
144
        mask = 0xffffffffffffe000ULL;
145
        mask <<= 3 * ((tlb->tte >> 61) & 3);
146
        size = ~mask + 1;
147

    
148
        va = tlb->tag & mask;
149

    
150
        for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
151
            tlb_flush_page(env1, va + offset);
152
        }
153
    }
154

    
155
    tlb->tag = tlb_tag;
156
    tlb->tte = tlb_tte;
157
}
158

    
159
static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
160
                      const char* strmmu, CPUState *env1)
161
{
162
    unsigned int i;
163
    target_ulong mask;
164
    uint64_t context;
165

    
166
    int is_demap_context = (demap_addr >> 6) & 1;
167

    
168
    // demap context
169
    switch ((demap_addr >> 4) & 3) {
170
    case 0: // primary
171
        context = env1->dmmu.mmu_primary_context;
172
        break;
173
    case 1: // secondary
174
        context = env1->dmmu.mmu_secondary_context;
175
        break;
176
    case 2: // nucleus
177
        context = 0;
178
        break;
179
    case 3: // reserved
180
    default:
181
        return;
182
    }
183

    
184
    for (i = 0; i < 64; i++) {
185
        if (TTE_IS_VALID(tlb[i].tte)) {
186

    
187
            if (is_demap_context) {
188
                // will remove non-global entries matching context value
189
                if (TTE_IS_GLOBAL(tlb[i].tte) ||
190
                    !tlb_compare_context(&tlb[i], context)) {
191
                    continue;
192
                }
193
            } else {
194
                // demap page
195
                // will remove any entry matching VA
196
                mask = 0xffffffffffffe000ULL;
197
                mask <<= 3 * ((tlb[i].tte >> 61) & 3);
198

    
199
                if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
200
                    continue;
201
                }
202

    
203
                // entry should be global or matching context value
204
                if (!TTE_IS_GLOBAL(tlb[i].tte) &&
205
                    !tlb_compare_context(&tlb[i], context)) {
206
                    continue;
207
                }
208
            }
209

    
210
            replace_tlb_entry(&tlb[i], 0, 0, env1);
211
#ifdef DEBUG_MMU
212
            DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
213
            dump_mmu(stdout, fprintf, env1);
214
#endif
215
        }
216
    }
217
}
218

    
219
static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
220
                                 uint64_t tlb_tag, uint64_t tlb_tte,
221
                                 const char* strmmu, CPUState *env1)
222
{
223
    unsigned int i, replace_used;
224

    
225
    // Try replacing invalid entry
226
    for (i = 0; i < 64; i++) {
227
        if (!TTE_IS_VALID(tlb[i].tte)) {
228
            replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
229
#ifdef DEBUG_MMU
230
            DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
231
            dump_mmu(stdout, fprintf, env1);
232
#endif
233
            return;
234
        }
235
    }
236

    
237
    // All entries are valid, try replacing unlocked entry
238

    
239
    for (replace_used = 0; replace_used < 2; ++replace_used) {
240

    
241
        // Used entries are not replaced on first pass
242

    
243
        for (i = 0; i < 64; i++) {
244
            if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
245

    
246
                replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
247
#ifdef DEBUG_MMU
248
                DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
249
                            strmmu, (replace_used?"used":"unused"), i);
250
                dump_mmu(stdout, fprintf, env1);
251
#endif
252
                return;
253
            }
254
        }
255

    
256
        // Now reset used bit and search for unused entries again
257

    
258
        for (i = 0; i < 64; i++) {
259
            TTE_SET_UNUSED(tlb[i].tte);
260
        }
261
    }
262

    
263
#ifdef DEBUG_MMU
264
    DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
265
#endif
266
    // error state?
267
}
268

    
269
#endif
270

    
271
static inline target_ulong address_mask(CPUState *env1, target_ulong addr)
272
{
273
#ifdef TARGET_SPARC64
274
    if (AM_CHECK(env1))
275
        addr &= 0xffffffffULL;
276
#endif
277
    return addr;
278
}
279

    
280
/* returns true if access using this ASI is to have address translated by MMU
281
   otherwise access is to raw physical address */
282
static inline int is_translating_asi(int asi)
283
{
284
#ifdef TARGET_SPARC64
285
    /* Ultrasparc IIi translating asi
286
       - note this list is defined by cpu implementation
287
     */
288
    switch (asi) {
289
    case 0x04 ... 0x11:
290
    case 0x16 ... 0x19:
291
    case 0x1E ... 0x1F:
292
    case 0x24 ... 0x2C:
293
    case 0x70 ... 0x73:
294
    case 0x78 ... 0x79:
295
    case 0x80 ... 0xFF:
296
        return 1;
297

    
298
    default:
299
        return 0;
300
    }
301
#else
302
    /* TODO: check sparc32 bits */
303
    return 0;
304
#endif
305
}
306

    
307
static inline target_ulong asi_address_mask(CPUState *env1,
308
                                            int asi, target_ulong addr)
309
{
310
    if (is_translating_asi(asi)) {
311
        return address_mask(env, addr);
312
    } else {
313
        return addr;
314
    }
315
}
316

    
317
static void raise_exception(int tt)
318
{
319
    env->exception_index = tt;
320
    cpu_loop_exit(env);
321
}
322

    
323
void HELPER(raise_exception)(int tt)
324
{
325
    raise_exception(tt);
326
}
327

    
328
void helper_shutdown(void)
329
{
330
#if !defined(CONFIG_USER_ONLY)
331
    qemu_system_shutdown_request();
332
#endif
333
}
334

    
335
void helper_check_align(target_ulong addr, uint32_t align)
336
{
337
    if (addr & align) {
338
#ifdef DEBUG_UNALIGNED
339
    printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
340
           "\n", addr, env->pc);
341
#endif
342
        raise_exception(TT_UNALIGNED);
343
    }
344
}
345

    
346
#define F_HELPER(name, p) void helper_f##name##p(void)
347

    
348
#define F_BINOP(name)                                           \
349
    float32 helper_f ## name ## s (float32 src1, float32 src2)  \
350
    {                                                           \
351
        return float32_ ## name (src1, src2, &env->fp_status);  \
352
    }                                                           \
353
    F_HELPER(name, d)                                           \
354
    {                                                           \
355
        DT0 = float64_ ## name (DT0, DT1, &env->fp_status);     \
356
    }                                                           \
357
    F_HELPER(name, q)                                           \
358
    {                                                           \
359
        QT0 = float128_ ## name (QT0, QT1, &env->fp_status);    \
360
    }
361

    
362
F_BINOP(add);
363
F_BINOP(sub);
364
F_BINOP(mul);
365
F_BINOP(div);
366
#undef F_BINOP
367

    
368
void helper_fsmuld(float32 src1, float32 src2)
369
{
370
    DT0 = float64_mul(float32_to_float64(src1, &env->fp_status),
371
                      float32_to_float64(src2, &env->fp_status),
372
                      &env->fp_status);
373
}
374

    
375
void helper_fdmulq(void)
376
{
377
    QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
378
                       float64_to_float128(DT1, &env->fp_status),
379
                       &env->fp_status);
380
}
381

    
382
float32 helper_fnegs(float32 src)
383
{
384
    return float32_chs(src);
385
}
386

    
387
#ifdef TARGET_SPARC64
388
F_HELPER(neg, d)
389
{
390
    DT0 = float64_chs(DT1);
391
}
392

    
393
F_HELPER(neg, q)
394
{
395
    QT0 = float128_chs(QT1);
396
}
397
#endif
398

    
399
/* Integer to float conversion.  */
400
float32 helper_fitos(int32_t src)
401
{
402
    return int32_to_float32(src, &env->fp_status);
403
}
404

    
405
void helper_fitod(int32_t src)
406
{
407
    DT0 = int32_to_float64(src, &env->fp_status);
408
}
409

    
410
void helper_fitoq(int32_t src)
411
{
412
    QT0 = int32_to_float128(src, &env->fp_status);
413
}
414

    
415
#ifdef TARGET_SPARC64
416
float32 helper_fxtos(void)
417
{
418
    return int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
419
}
420

    
421
F_HELPER(xto, d)
422
{
423
    DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
424
}
425

    
426
F_HELPER(xto, q)
427
{
428
    QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
429
}
430
#endif
431
#undef F_HELPER
432

    
433
/* floating point conversion */
434
float32 helper_fdtos(void)
435
{
436
    return float64_to_float32(DT1, &env->fp_status);
437
}
438

    
439
void helper_fstod(float32 src)
440
{
441
    DT0 = float32_to_float64(src, &env->fp_status);
442
}
443

    
444
float32 helper_fqtos(void)
445
{
446
    return float128_to_float32(QT1, &env->fp_status);
447
}
448

    
449
void helper_fstoq(float32 src)
450
{
451
    QT0 = float32_to_float128(src, &env->fp_status);
452
}
453

    
454
void helper_fqtod(void)
455
{
456
    DT0 = float128_to_float64(QT1, &env->fp_status);
457
}
458

    
459
void helper_fdtoq(void)
460
{
461
    QT0 = float64_to_float128(DT1, &env->fp_status);
462
}
463

    
464
/* Float to integer conversion.  */
465
int32_t helper_fstoi(float32 src)
466
{
467
    return float32_to_int32_round_to_zero(src, &env->fp_status);
468
}
469

    
470
int32_t helper_fdtoi(void)
471
{
472
    return float64_to_int32_round_to_zero(DT1, &env->fp_status);
473
}
474

    
475
int32_t helper_fqtoi(void)
476
{
477
    return float128_to_int32_round_to_zero(QT1, &env->fp_status);
478
}
479

    
480
#ifdef TARGET_SPARC64
481
void helper_fstox(float32 src)
482
{
483
    *((int64_t *)&DT0) = float32_to_int64_round_to_zero(src, &env->fp_status);
484
}
485

    
486
void helper_fdtox(void)
487
{
488
    *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
489
}
490

    
491
void helper_fqtox(void)
492
{
493
    *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
494
}
495

    
496
void helper_faligndata(void)
497
{
498
    uint64_t tmp;
499

    
500
    tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
501
    /* on many architectures a shift of 64 does nothing */
502
    if ((env->gsr & 7) != 0) {
503
        tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
504
    }
505
    *((uint64_t *)&DT0) = tmp;
506
}
507

    
508
#ifdef HOST_WORDS_BIGENDIAN
509
#define VIS_B64(n) b[7 - (n)]
510
#define VIS_W64(n) w[3 - (n)]
511
#define VIS_SW64(n) sw[3 - (n)]
512
#define VIS_L64(n) l[1 - (n)]
513
#define VIS_B32(n) b[3 - (n)]
514
#define VIS_W32(n) w[1 - (n)]
515
#else
516
#define VIS_B64(n) b[n]
517
#define VIS_W64(n) w[n]
518
#define VIS_SW64(n) sw[n]
519
#define VIS_L64(n) l[n]
520
#define VIS_B32(n) b[n]
521
#define VIS_W32(n) w[n]
522
#endif
523

    
524
typedef union {
525
    uint8_t b[8];
526
    uint16_t w[4];
527
    int16_t sw[4];
528
    uint32_t l[2];
529
    uint64_t ll;
530
    float64 d;
531
} vis64;
532

    
533
typedef union {
534
    uint8_t b[4];
535
    uint16_t w[2];
536
    uint32_t l;
537
    float32 f;
538
} vis32;
539

    
540
void helper_fpmerge(void)
541
{
542
    vis64 s, d;
543

    
544
    s.d = DT0;
545
    d.d = DT1;
546

    
547
    // Reverse calculation order to handle overlap
548
    d.VIS_B64(7) = s.VIS_B64(3);
549
    d.VIS_B64(6) = d.VIS_B64(3);
550
    d.VIS_B64(5) = s.VIS_B64(2);
551
    d.VIS_B64(4) = d.VIS_B64(2);
552
    d.VIS_B64(3) = s.VIS_B64(1);
553
    d.VIS_B64(2) = d.VIS_B64(1);
554
    d.VIS_B64(1) = s.VIS_B64(0);
555
    //d.VIS_B64(0) = d.VIS_B64(0);
556

    
557
    DT0 = d.d;
558
}
559

    
560
void helper_fmul8x16(void)
561
{
562
    vis64 s, d;
563
    uint32_t tmp;
564

    
565
    s.d = DT0;
566
    d.d = DT1;
567

    
568
#define PMUL(r)                                                 \
569
    tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r);       \
570
    if ((tmp & 0xff) > 0x7f)                                    \
571
        tmp += 0x100;                                           \
572
    d.VIS_W64(r) = tmp >> 8;
573

    
574
    PMUL(0);
575
    PMUL(1);
576
    PMUL(2);
577
    PMUL(3);
578
#undef PMUL
579

    
580
    DT0 = d.d;
581
}
582

    
583
void helper_fmul8x16al(void)
584
{
585
    vis64 s, d;
586
    uint32_t tmp;
587

    
588
    s.d = DT0;
589
    d.d = DT1;
590

    
591
#define PMUL(r)                                                 \
592
    tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r);       \
593
    if ((tmp & 0xff) > 0x7f)                                    \
594
        tmp += 0x100;                                           \
595
    d.VIS_W64(r) = tmp >> 8;
596

    
597
    PMUL(0);
598
    PMUL(1);
599
    PMUL(2);
600
    PMUL(3);
601
#undef PMUL
602

    
603
    DT0 = d.d;
604
}
605

    
606
void helper_fmul8x16au(void)
607
{
608
    vis64 s, d;
609
    uint32_t tmp;
610

    
611
    s.d = DT0;
612
    d.d = DT1;
613

    
614
#define PMUL(r)                                                 \
615
    tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r);       \
616
    if ((tmp & 0xff) > 0x7f)                                    \
617
        tmp += 0x100;                                           \
618
    d.VIS_W64(r) = tmp >> 8;
619

    
620
    PMUL(0);
621
    PMUL(1);
622
    PMUL(2);
623
    PMUL(3);
624
#undef PMUL
625

    
626
    DT0 = d.d;
627
}
628

    
629
void helper_fmul8sux16(void)
630
{
631
    vis64 s, d;
632
    uint32_t tmp;
633

    
634
    s.d = DT0;
635
    d.d = DT1;
636

    
637
#define PMUL(r)                                                         \
638
    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
639
    if ((tmp & 0xff) > 0x7f)                                            \
640
        tmp += 0x100;                                                   \
641
    d.VIS_W64(r) = tmp >> 8;
642

    
643
    PMUL(0);
644
    PMUL(1);
645
    PMUL(2);
646
    PMUL(3);
647
#undef PMUL
648

    
649
    DT0 = d.d;
650
}
651

    
652
void helper_fmul8ulx16(void)
653
{
654
    vis64 s, d;
655
    uint32_t tmp;
656

    
657
    s.d = DT0;
658
    d.d = DT1;
659

    
660
#define PMUL(r)                                                         \
661
    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
662
    if ((tmp & 0xff) > 0x7f)                                            \
663
        tmp += 0x100;                                                   \
664
    d.VIS_W64(r) = tmp >> 8;
665

    
666
    PMUL(0);
667
    PMUL(1);
668
    PMUL(2);
669
    PMUL(3);
670
#undef PMUL
671

    
672
    DT0 = d.d;
673
}
674

    
675
void helper_fmuld8sux16(void)
676
{
677
    vis64 s, d;
678
    uint32_t tmp;
679

    
680
    s.d = DT0;
681
    d.d = DT1;
682

    
683
#define PMUL(r)                                                         \
684
    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
685
    if ((tmp & 0xff) > 0x7f)                                            \
686
        tmp += 0x100;                                                   \
687
    d.VIS_L64(r) = tmp;
688

    
689
    // Reverse calculation order to handle overlap
690
    PMUL(1);
691
    PMUL(0);
692
#undef PMUL
693

    
694
    DT0 = d.d;
695
}
696

    
697
void helper_fmuld8ulx16(void)
698
{
699
    vis64 s, d;
700
    uint32_t tmp;
701

    
702
    s.d = DT0;
703
    d.d = DT1;
704

    
705
#define PMUL(r)                                                         \
706
    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
707
    if ((tmp & 0xff) > 0x7f)                                            \
708
        tmp += 0x100;                                                   \
709
    d.VIS_L64(r) = tmp;
710

    
711
    // Reverse calculation order to handle overlap
712
    PMUL(1);
713
    PMUL(0);
714
#undef PMUL
715

    
716
    DT0 = d.d;
717
}
718

    
719
void helper_fexpand(void)
720
{
721
    vis32 s;
722
    vis64 d;
723

    
724
    s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
725
    d.d = DT1;
726
    d.VIS_W64(0) = s.VIS_B32(0) << 4;
727
    d.VIS_W64(1) = s.VIS_B32(1) << 4;
728
    d.VIS_W64(2) = s.VIS_B32(2) << 4;
729
    d.VIS_W64(3) = s.VIS_B32(3) << 4;
730

    
731
    DT0 = d.d;
732
}
733

    
734
#define VIS_HELPER(name, F)                             \
735
    void name##16(void)                                 \
736
    {                                                   \
737
        vis64 s, d;                                     \
738
                                                        \
739
        s.d = DT0;                                      \
740
        d.d = DT1;                                      \
741
                                                        \
742
        d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0));   \
743
        d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1));   \
744
        d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2));   \
745
        d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3));   \
746
                                                        \
747
        DT0 = d.d;                                      \
748
    }                                                   \
749
                                                        \
750
    uint32_t name##16s(uint32_t src1, uint32_t src2)    \
751
    {                                                   \
752
        vis32 s, d;                                     \
753
                                                        \
754
        s.l = src1;                                     \
755
        d.l = src2;                                     \
756
                                                        \
757
        d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0));   \
758
        d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1));   \
759
                                                        \
760
        return d.l;                                     \
761
    }                                                   \
762
                                                        \
763
    void name##32(void)                                 \
764
    {                                                   \
765
        vis64 s, d;                                     \
766
                                                        \
767
        s.d = DT0;                                      \
768
        d.d = DT1;                                      \
769
                                                        \
770
        d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0));   \
771
        d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1));   \
772
                                                        \
773
        DT0 = d.d;                                      \
774
    }                                                   \
775
                                                        \
776
    uint32_t name##32s(uint32_t src1, uint32_t src2)    \
777
    {                                                   \
778
        vis32 s, d;                                     \
779
                                                        \
780
        s.l = src1;                                     \
781
        d.l = src2;                                     \
782
                                                        \
783
        d.l = F(d.l, s.l);                              \
784
                                                        \
785
        return d.l;                                     \
786
    }
787

    
788
#define FADD(a, b) ((a) + (b))
789
#define FSUB(a, b) ((a) - (b))
790
VIS_HELPER(helper_fpadd, FADD)
791
VIS_HELPER(helper_fpsub, FSUB)
792

    
793
#define VIS_CMPHELPER(name, F)                                        \
794
    uint64_t name##16(void)                                       \
795
    {                                                             \
796
        vis64 s, d;                                               \
797
                                                                  \
798
        s.d = DT0;                                                \
799
        d.d = DT1;                                                \
800
                                                                  \
801
        d.VIS_W64(0) = F(s.VIS_W64(0), d.VIS_W64(0)) ? 1 : 0;     \
802
        d.VIS_W64(0) |= F(s.VIS_W64(1), d.VIS_W64(1)) ? 2 : 0;    \
803
        d.VIS_W64(0) |= F(s.VIS_W64(2), d.VIS_W64(2)) ? 4 : 0;    \
804
        d.VIS_W64(0) |= F(s.VIS_W64(3), d.VIS_W64(3)) ? 8 : 0;    \
805
        d.VIS_W64(1) = d.VIS_W64(2) = d.VIS_W64(3) = 0;           \
806
                                                                  \
807
        return d.ll;                                              \
808
    }                                                             \
809
                                                                  \
810
    uint64_t name##32(void)                                       \
811
    {                                                             \
812
        vis64 s, d;                                               \
813
                                                                  \
814
        s.d = DT0;                                                \
815
        d.d = DT1;                                                \
816
                                                                  \
817
        d.VIS_L64(0) = F(s.VIS_L64(0), d.VIS_L64(0)) ? 1 : 0;     \
818
        d.VIS_L64(0) |= F(s.VIS_L64(1), d.VIS_L64(1)) ? 2 : 0;    \
819
        d.VIS_L64(1) = 0;                                         \
820
                                                                  \
821
        return d.ll;                                              \
822
    }
823

    
824
#define FCMPGT(a, b) ((a) > (b))
825
#define FCMPEQ(a, b) ((a) == (b))
826
#define FCMPLE(a, b) ((a) <= (b))
827
#define FCMPNE(a, b) ((a) != (b))
828

    
829
VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
830
VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
831
VIS_CMPHELPER(helper_fcmple, FCMPLE)
832
VIS_CMPHELPER(helper_fcmpne, FCMPNE)
833
#endif
834

    
835
void helper_check_ieee_exceptions(void)
836
{
837
    target_ulong status;
838

    
839
    status = get_float_exception_flags(&env->fp_status);
840
    if (status) {
841
        /* Copy IEEE 754 flags into FSR */
842
        if (status & float_flag_invalid)
843
            env->fsr |= FSR_NVC;
844
        if (status & float_flag_overflow)
845
            env->fsr |= FSR_OFC;
846
        if (status & float_flag_underflow)
847
            env->fsr |= FSR_UFC;
848
        if (status & float_flag_divbyzero)
849
            env->fsr |= FSR_DZC;
850
        if (status & float_flag_inexact)
851
            env->fsr |= FSR_NXC;
852

    
853
        if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
854
            /* Unmasked exception, generate a trap */
855
            env->fsr |= FSR_FTT_IEEE_EXCP;
856
            raise_exception(TT_FP_EXCP);
857
        } else {
858
            /* Accumulate exceptions */
859
            env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
860
        }
861
    }
862
}
863

    
864
void helper_clear_float_exceptions(void)
865
{
866
    set_float_exception_flags(0, &env->fp_status);
867
}
868

    
869
float32 helper_fabss(float32 src)
870
{
871
    return float32_abs(src);
872
}
873

    
874
#ifdef TARGET_SPARC64
875
void helper_fabsd(void)
876
{
877
    DT0 = float64_abs(DT1);
878
}
879

    
880
void helper_fabsq(void)
881
{
882
    QT0 = float128_abs(QT1);
883
}
884
#endif
885

    
886
float32 helper_fsqrts(float32 src)
887
{
888
    return float32_sqrt(src, &env->fp_status);
889
}
890

    
891
void helper_fsqrtd(void)
892
{
893
    DT0 = float64_sqrt(DT1, &env->fp_status);
894
}
895

    
896
void helper_fsqrtq(void)
897
{
898
    QT0 = float128_sqrt(QT1, &env->fp_status);
899
}
900

    
901
#define GEN_FCMP(name, size, reg1, reg2, FS, E)                         \
902
    void glue(helper_, name) (void)                                     \
903
    {                                                                   \
904
        env->fsr &= FSR_FTT_NMASK;                                      \
905
        if (E && (glue(size, _is_any_nan)(reg1) ||                      \
906
                     glue(size, _is_any_nan)(reg2)) &&                  \
907
            (env->fsr & FSR_NVM)) {                                     \
908
            env->fsr |= FSR_NVC;                                        \
909
            env->fsr |= FSR_FTT_IEEE_EXCP;                              \
910
            raise_exception(TT_FP_EXCP);                                \
911
        }                                                               \
912
        switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) {   \
913
        case float_relation_unordered:                                  \
914
            if ((env->fsr & FSR_NVM)) {                                 \
915
                env->fsr |= FSR_NVC;                                    \
916
                env->fsr |= FSR_FTT_IEEE_EXCP;                          \
917
                raise_exception(TT_FP_EXCP);                            \
918
            } else {                                                    \
919
                env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);             \
920
                env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS;                \
921
                env->fsr |= FSR_NVA;                                    \
922
            }                                                           \
923
            break;                                                      \
924
        case float_relation_less:                                       \
925
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
926
            env->fsr |= FSR_FCC0 << FS;                                 \
927
            break;                                                      \
928
        case float_relation_greater:                                    \
929
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
930
            env->fsr |= FSR_FCC1 << FS;                                 \
931
            break;                                                      \
932
        default:                                                        \
933
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
934
            break;                                                      \
935
        }                                                               \
936
    }
937
#define GEN_FCMPS(name, size, FS, E)                                    \
938
    void glue(helper_, name)(float32 src1, float32 src2)                \
939
    {                                                                   \
940
        env->fsr &= FSR_FTT_NMASK;                                      \
941
        if (E && (glue(size, _is_any_nan)(src1) ||                      \
942
                     glue(size, _is_any_nan)(src2)) &&                  \
943
            (env->fsr & FSR_NVM)) {                                     \
944
            env->fsr |= FSR_NVC;                                        \
945
            env->fsr |= FSR_FTT_IEEE_EXCP;                              \
946
            raise_exception(TT_FP_EXCP);                                \
947
        }                                                               \
948
        switch (glue(size, _compare) (src1, src2, &env->fp_status)) {   \
949
        case float_relation_unordered:                                  \
950
            if ((env->fsr & FSR_NVM)) {                                 \
951
                env->fsr |= FSR_NVC;                                    \
952
                env->fsr |= FSR_FTT_IEEE_EXCP;                          \
953
                raise_exception(TT_FP_EXCP);                            \
954
            } else {                                                    \
955
                env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);             \
956
                env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS;                \
957
                env->fsr |= FSR_NVA;                                    \
958
            }                                                           \
959
            break;                                                      \
960
        case float_relation_less:                                       \
961
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
962
            env->fsr |= FSR_FCC0 << FS;                                 \
963
            break;                                                      \
964
        case float_relation_greater:                                    \
965
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
966
            env->fsr |= FSR_FCC1 << FS;                                 \
967
            break;                                                      \
968
        default:                                                        \
969
            env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                 \
970
            break;                                                      \
971
        }                                                               \
972
    }
973

    
974
GEN_FCMPS(fcmps, float32, 0, 0);
975
GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
976

    
977
GEN_FCMPS(fcmpes, float32, 0, 1);
978
GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
979

    
980
GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
981
GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
982

    
983
static uint32_t compute_all_flags(void)
984
{
985
    return env->psr & PSR_ICC;
986
}
987

    
988
static uint32_t compute_C_flags(void)
989
{
990
    return env->psr & PSR_CARRY;
991
}
992

    
993
static inline uint32_t get_NZ_icc(int32_t dst)
994
{
995
    uint32_t ret = 0;
996

    
997
    if (dst == 0) {
998
        ret = PSR_ZERO;
999
    } else if (dst < 0) {
1000
        ret = PSR_NEG;
1001
    }
1002
    return ret;
1003
}
1004

    
1005
#ifdef TARGET_SPARC64
1006
static uint32_t compute_all_flags_xcc(void)
1007
{
1008
    return env->xcc & PSR_ICC;
1009
}
1010

    
1011
static uint32_t compute_C_flags_xcc(void)
1012
{
1013
    return env->xcc & PSR_CARRY;
1014
}
1015

    
1016
static inline uint32_t get_NZ_xcc(target_long dst)
1017
{
1018
    uint32_t ret = 0;
1019

    
1020
    if (!dst) {
1021
        ret = PSR_ZERO;
1022
    } else if (dst < 0) {
1023
        ret = PSR_NEG;
1024
    }
1025
    return ret;
1026
}
1027
#endif
1028

    
1029
static inline uint32_t get_V_div_icc(target_ulong src2)
1030
{
1031
    uint32_t ret = 0;
1032

    
1033
    if (src2 != 0) {
1034
        ret = PSR_OVF;
1035
    }
1036
    return ret;
1037
}
1038

    
1039
static uint32_t compute_all_div(void)
1040
{
1041
    uint32_t ret;
1042

    
1043
    ret = get_NZ_icc(CC_DST);
1044
    ret |= get_V_div_icc(CC_SRC2);
1045
    return ret;
1046
}
1047

    
1048
static uint32_t compute_C_div(void)
1049
{
1050
    return 0;
1051
}
1052

    
1053
static inline uint32_t get_C_add_icc(uint32_t dst, uint32_t src1)
1054
{
1055
    uint32_t ret = 0;
1056

    
1057
    if (dst < src1) {
1058
        ret = PSR_CARRY;
1059
    }
1060
    return ret;
1061
}
1062

    
1063
static inline uint32_t get_C_addx_icc(uint32_t dst, uint32_t src1,
1064
                                      uint32_t src2)
1065
{
1066
    uint32_t ret = 0;
1067

    
1068
    if (((src1 & src2) | (~dst & (src1 | src2))) & (1U << 31)) {
1069
        ret = PSR_CARRY;
1070
    }
1071
    return ret;
1072
}
1073

    
1074
static inline uint32_t get_V_add_icc(uint32_t dst, uint32_t src1,
1075
                                     uint32_t src2)
1076
{
1077
    uint32_t ret = 0;
1078

    
1079
    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1U << 31)) {
1080
        ret = PSR_OVF;
1081
    }
1082
    return ret;
1083
}
1084

    
1085
#ifdef TARGET_SPARC64
1086
static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1)
1087
{
1088
    uint32_t ret = 0;
1089

    
1090
    if (dst < src1) {
1091
        ret = PSR_CARRY;
1092
    }
1093
    return ret;
1094
}
1095

    
1096
static inline uint32_t get_C_addx_xcc(target_ulong dst, target_ulong src1,
1097
                                      target_ulong src2)
1098
{
1099
    uint32_t ret = 0;
1100

    
1101
    if (((src1 & src2) | (~dst & (src1 | src2))) & (1ULL << 63)) {
1102
        ret = PSR_CARRY;
1103
    }
1104
    return ret;
1105
}
1106

    
1107
static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1,
1108
                                         target_ulong src2)
1109
{
1110
    uint32_t ret = 0;
1111

    
1112
    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63)) {
1113
        ret = PSR_OVF;
1114
    }
1115
    return ret;
1116
}
1117

    
1118
static uint32_t compute_all_add_xcc(void)
1119
{
1120
    uint32_t ret;
1121

    
1122
    ret = get_NZ_xcc(CC_DST);
1123
    ret |= get_C_add_xcc(CC_DST, CC_SRC);
1124
    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1125
    return ret;
1126
}
1127

    
1128
static uint32_t compute_C_add_xcc(void)
1129
{
1130
    return get_C_add_xcc(CC_DST, CC_SRC);
1131
}
1132
#endif
1133

    
1134
static uint32_t compute_all_add(void)
1135
{
1136
    uint32_t ret;
1137

    
1138
    ret = get_NZ_icc(CC_DST);
1139
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1140
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1141
    return ret;
1142
}
1143

    
1144
static uint32_t compute_C_add(void)
1145
{
1146
    return get_C_add_icc(CC_DST, CC_SRC);
1147
}
1148

    
1149
#ifdef TARGET_SPARC64
1150
static uint32_t compute_all_addx_xcc(void)
1151
{
1152
    uint32_t ret;
1153

    
1154
    ret = get_NZ_xcc(CC_DST);
1155
    ret |= get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1156
    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1157
    return ret;
1158
}
1159

    
1160
static uint32_t compute_C_addx_xcc(void)
1161
{
1162
    uint32_t ret;
1163

    
1164
    ret = get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1165
    return ret;
1166
}
1167
#endif
1168

    
1169
static uint32_t compute_all_addx(void)
1170
{
1171
    uint32_t ret;
1172

    
1173
    ret = get_NZ_icc(CC_DST);
1174
    ret |= get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1175
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1176
    return ret;
1177
}
1178

    
1179
static uint32_t compute_C_addx(void)
1180
{
1181
    uint32_t ret;
1182

    
1183
    ret = get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1184
    return ret;
1185
}
1186

    
1187
static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2)
1188
{
1189
    uint32_t ret = 0;
1190

    
1191
    if ((src1 | src2) & 0x3) {
1192
        ret = PSR_OVF;
1193
    }
1194
    return ret;
1195
}
1196

    
1197
static uint32_t compute_all_tadd(void)
1198
{
1199
    uint32_t ret;
1200

    
1201
    ret = get_NZ_icc(CC_DST);
1202
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1203
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1204
    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1205
    return ret;
1206
}
1207

    
1208
static uint32_t compute_all_taddtv(void)
1209
{
1210
    uint32_t ret;
1211

    
1212
    ret = get_NZ_icc(CC_DST);
1213
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1214
    return ret;
1215
}
1216

    
1217
static inline uint32_t get_C_sub_icc(uint32_t src1, uint32_t src2)
1218
{
1219
    uint32_t ret = 0;
1220

    
1221
    if (src1 < src2) {
1222
        ret = PSR_CARRY;
1223
    }
1224
    return ret;
1225
}
1226

    
1227
static inline uint32_t get_C_subx_icc(uint32_t dst, uint32_t src1,
1228
                                      uint32_t src2)
1229
{
1230
    uint32_t ret = 0;
1231

    
1232
    if (((~src1 & src2) | (dst & (~src1 | src2))) & (1U << 31)) {
1233
        ret = PSR_CARRY;
1234
    }
1235
    return ret;
1236
}
1237

    
1238
static inline uint32_t get_V_sub_icc(uint32_t dst, uint32_t src1,
1239
                                     uint32_t src2)
1240
{
1241
    uint32_t ret = 0;
1242

    
1243
    if (((src1 ^ src2) & (src1 ^ dst)) & (1U << 31)) {
1244
        ret = PSR_OVF;
1245
    }
1246
    return ret;
1247
}
1248

    
1249

    
1250
#ifdef TARGET_SPARC64
1251
static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2)
1252
{
1253
    uint32_t ret = 0;
1254

    
1255
    if (src1 < src2) {
1256
        ret = PSR_CARRY;
1257
    }
1258
    return ret;
1259
}
1260

    
1261
static inline uint32_t get_C_subx_xcc(target_ulong dst, target_ulong src1,
1262
                                      target_ulong src2)
1263
{
1264
    uint32_t ret = 0;
1265

    
1266
    if (((~src1 & src2) | (dst & (~src1 | src2))) & (1ULL << 63)) {
1267
        ret = PSR_CARRY;
1268
    }
1269
    return ret;
1270
}
1271

    
1272
static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1,
1273
                                     target_ulong src2)
1274
{
1275
    uint32_t ret = 0;
1276

    
1277
    if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63)) {
1278
        ret = PSR_OVF;
1279
    }
1280
    return ret;
1281
}
1282

    
1283
static uint32_t compute_all_sub_xcc(void)
1284
{
1285
    uint32_t ret;
1286

    
1287
    ret = get_NZ_xcc(CC_DST);
1288
    ret |= get_C_sub_xcc(CC_SRC, CC_SRC2);
1289
    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1290
    return ret;
1291
}
1292

    
1293
static uint32_t compute_C_sub_xcc(void)
1294
{
1295
    return get_C_sub_xcc(CC_SRC, CC_SRC2);
1296
}
1297
#endif
1298

    
1299
static uint32_t compute_all_sub(void)
1300
{
1301
    uint32_t ret;
1302

    
1303
    ret = get_NZ_icc(CC_DST);
1304
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1305
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1306
    return ret;
1307
}
1308

    
1309
static uint32_t compute_C_sub(void)
1310
{
1311
    return get_C_sub_icc(CC_SRC, CC_SRC2);
1312
}
1313

    
1314
#ifdef TARGET_SPARC64
1315
static uint32_t compute_all_subx_xcc(void)
1316
{
1317
    uint32_t ret;
1318

    
1319
    ret = get_NZ_xcc(CC_DST);
1320
    ret |= get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1321
    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1322
    return ret;
1323
}
1324

    
1325
static uint32_t compute_C_subx_xcc(void)
1326
{
1327
    uint32_t ret;
1328

    
1329
    ret = get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1330
    return ret;
1331
}
1332
#endif
1333

    
1334
static uint32_t compute_all_subx(void)
1335
{
1336
    uint32_t ret;
1337

    
1338
    ret = get_NZ_icc(CC_DST);
1339
    ret |= get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1340
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1341
    return ret;
1342
}
1343

    
1344
static uint32_t compute_C_subx(void)
1345
{
1346
    uint32_t ret;
1347

    
1348
    ret = get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1349
    return ret;
1350
}
1351

    
1352
static uint32_t compute_all_tsub(void)
1353
{
1354
    uint32_t ret;
1355

    
1356
    ret = get_NZ_icc(CC_DST);
1357
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1358
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1359
    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1360
    return ret;
1361
}
1362

    
1363
static uint32_t compute_all_tsubtv(void)
1364
{
1365
    uint32_t ret;
1366

    
1367
    ret = get_NZ_icc(CC_DST);
1368
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1369
    return ret;
1370
}
1371

    
1372
static uint32_t compute_all_logic(void)
1373
{
1374
    return get_NZ_icc(CC_DST);
1375
}
1376

    
1377
static uint32_t compute_C_logic(void)
1378
{
1379
    return 0;
1380
}
1381

    
1382
#ifdef TARGET_SPARC64
1383
static uint32_t compute_all_logic_xcc(void)
1384
{
1385
    return get_NZ_xcc(CC_DST);
1386
}
1387
#endif
1388

    
1389
typedef struct CCTable {
1390
    uint32_t (*compute_all)(void); /* return all the flags */
1391
    uint32_t (*compute_c)(void);  /* return the C flag */
1392
} CCTable;
1393

    
1394
static const CCTable icc_table[CC_OP_NB] = {
1395
    /* CC_OP_DYNAMIC should never happen */
1396
    [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags },
1397
    [CC_OP_DIV] = { compute_all_div, compute_C_div },
1398
    [CC_OP_ADD] = { compute_all_add, compute_C_add },
1399
    [CC_OP_ADDX] = { compute_all_addx, compute_C_addx },
1400
    [CC_OP_TADD] = { compute_all_tadd, compute_C_add },
1401
    [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_add },
1402
    [CC_OP_SUB] = { compute_all_sub, compute_C_sub },
1403
    [CC_OP_SUBX] = { compute_all_subx, compute_C_subx },
1404
    [CC_OP_TSUB] = { compute_all_tsub, compute_C_sub },
1405
    [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_sub },
1406
    [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic },
1407
};
1408

    
1409
#ifdef TARGET_SPARC64
1410
static const CCTable xcc_table[CC_OP_NB] = {
1411
    /* CC_OP_DYNAMIC should never happen */
1412
    [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc },
1413
    [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic },
1414
    [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc },
1415
    [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc },
1416
    [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc },
1417
    [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc },
1418
    [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1419
    [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc },
1420
    [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1421
    [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc },
1422
    [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic },
1423
};
1424
#endif
1425

    
1426
void helper_compute_psr(void)
1427
{
1428
    uint32_t new_psr;
1429

    
1430
    new_psr = icc_table[CC_OP].compute_all();
1431
    env->psr = new_psr;
1432
#ifdef TARGET_SPARC64
1433
    new_psr = xcc_table[CC_OP].compute_all();
1434
    env->xcc = new_psr;
1435
#endif
1436
    CC_OP = CC_OP_FLAGS;
1437
}
1438

    
1439
uint32_t helper_compute_C_icc(void)
1440
{
1441
    uint32_t ret;
1442

    
1443
    ret = icc_table[CC_OP].compute_c() >> PSR_CARRY_SHIFT;
1444
    return ret;
1445
}
1446

    
1447
static inline void memcpy32(target_ulong *dst, const target_ulong *src)
1448
{
1449
    dst[0] = src[0];
1450
    dst[1] = src[1];
1451
    dst[2] = src[2];
1452
    dst[3] = src[3];
1453
    dst[4] = src[4];
1454
    dst[5] = src[5];
1455
    dst[6] = src[6];
1456
    dst[7] = src[7];
1457
}
1458

    
1459
static void set_cwp(int new_cwp)
1460
{
1461
    /* put the modified wrap registers at their proper location */
1462
    if (env->cwp == env->nwindows - 1) {
1463
        memcpy32(env->regbase, env->regbase + env->nwindows * 16);
1464
    }
1465
    env->cwp = new_cwp;
1466

    
1467
    /* put the wrap registers at their temporary location */
1468
    if (new_cwp == env->nwindows - 1) {
1469
        memcpy32(env->regbase + env->nwindows * 16, env->regbase);
1470
    }
1471
    env->regwptr = env->regbase + (new_cwp * 16);
1472
}
1473

    
1474
void cpu_set_cwp(CPUState *env1, int new_cwp)
1475
{
1476
    CPUState *saved_env;
1477

    
1478
    saved_env = env;
1479
    env = env1;
1480
    set_cwp(new_cwp);
1481
    env = saved_env;
1482
}
1483

    
1484
static target_ulong get_psr(void)
1485
{
1486
    helper_compute_psr();
1487

    
1488
#if !defined (TARGET_SPARC64)
1489
    return env->version | (env->psr & PSR_ICC) |
1490
        (env->psref? PSR_EF : 0) |
1491
        (env->psrpil << 8) |
1492
        (env->psrs? PSR_S : 0) |
1493
        (env->psrps? PSR_PS : 0) |
1494
        (env->psret? PSR_ET : 0) | env->cwp;
1495
#else
1496
    return env->psr & PSR_ICC;
1497
#endif
1498
}
1499

    
1500
target_ulong cpu_get_psr(CPUState *env1)
1501
{
1502
    CPUState *saved_env;
1503
    target_ulong ret;
1504

    
1505
    saved_env = env;
1506
    env = env1;
1507
    ret = get_psr();
1508
    env = saved_env;
1509
    return ret;
1510
}
1511

    
1512
static void put_psr(target_ulong val)
1513
{
1514
    env->psr = val & PSR_ICC;
1515
#if !defined (TARGET_SPARC64)
1516
    env->psref = (val & PSR_EF)? 1 : 0;
1517
    env->psrpil = (val & PSR_PIL) >> 8;
1518
#endif
1519
#if ((!defined (TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY))
1520
    cpu_check_irqs(env);
1521
#endif
1522
#if !defined (TARGET_SPARC64)
1523
    env->psrs = (val & PSR_S)? 1 : 0;
1524
    env->psrps = (val & PSR_PS)? 1 : 0;
1525
    env->psret = (val & PSR_ET)? 1 : 0;
1526
    set_cwp(val & PSR_CWP);
1527
#endif
1528
    env->cc_op = CC_OP_FLAGS;
1529
}
1530

    
1531
void cpu_put_psr(CPUState *env1, target_ulong val)
1532
{
1533
    CPUState *saved_env;
1534

    
1535
    saved_env = env;
1536
    env = env1;
1537
    put_psr(val);
1538
    env = saved_env;
1539
}
1540

    
1541
static int cwp_inc(int cwp)
1542
{
1543
    if (unlikely(cwp >= env->nwindows)) {
1544
        cwp -= env->nwindows;
1545
    }
1546
    return cwp;
1547
}
1548

    
1549
int cpu_cwp_inc(CPUState *env1, int cwp)
1550
{
1551
    CPUState *saved_env;
1552
    target_ulong ret;
1553

    
1554
    saved_env = env;
1555
    env = env1;
1556
    ret = cwp_inc(cwp);
1557
    env = saved_env;
1558
    return ret;
1559
}
1560

    
1561
static int cwp_dec(int cwp)
1562
{
1563
    if (unlikely(cwp < 0)) {
1564
        cwp += env->nwindows;
1565
    }
1566
    return cwp;
1567
}
1568

    
1569
int cpu_cwp_dec(CPUState *env1, int cwp)
1570
{
1571
    CPUState *saved_env;
1572
    target_ulong ret;
1573

    
1574
    saved_env = env;
1575
    env = env1;
1576
    ret = cwp_dec(cwp);
1577
    env = saved_env;
1578
    return ret;
1579
}
1580

    
1581
#ifdef TARGET_SPARC64
1582
GEN_FCMPS(fcmps_fcc1, float32, 22, 0);
1583
GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
1584
GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
1585

    
1586
GEN_FCMPS(fcmps_fcc2, float32, 24, 0);
1587
GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
1588
GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
1589

    
1590
GEN_FCMPS(fcmps_fcc3, float32, 26, 0);
1591
GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
1592
GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
1593

    
1594
GEN_FCMPS(fcmpes_fcc1, float32, 22, 1);
1595
GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
1596
GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
1597

    
1598
GEN_FCMPS(fcmpes_fcc2, float32, 24, 1);
1599
GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
1600
GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
1601

    
1602
GEN_FCMPS(fcmpes_fcc3, float32, 26, 1);
1603
GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
1604
GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
1605
#endif
1606
#undef GEN_FCMPS
1607

    
1608
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
1609
    defined(DEBUG_MXCC)
1610
static void dump_mxcc(CPUState *env)
1611
{
1612
    printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1613
           "\n",
1614
           env->mxccdata[0], env->mxccdata[1],
1615
           env->mxccdata[2], env->mxccdata[3]);
1616
    printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1617
           "\n"
1618
           "          %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1619
           "\n",
1620
           env->mxccregs[0], env->mxccregs[1],
1621
           env->mxccregs[2], env->mxccregs[3],
1622
           env->mxccregs[4], env->mxccregs[5],
1623
           env->mxccregs[6], env->mxccregs[7]);
1624
}
1625
#endif
1626

    
1627
#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
1628
    && defined(DEBUG_ASI)
1629
static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
1630
                     uint64_t r1)
1631
{
1632
    switch (size)
1633
    {
1634
    case 1:
1635
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
1636
                    addr, asi, r1 & 0xff);
1637
        break;
1638
    case 2:
1639
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
1640
                    addr, asi, r1 & 0xffff);
1641
        break;
1642
    case 4:
1643
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
1644
                    addr, asi, r1 & 0xffffffff);
1645
        break;
1646
    case 8:
1647
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
1648
                    addr, asi, r1);
1649
        break;
1650
    }
1651
}
1652
#endif
1653

    
1654
#ifndef TARGET_SPARC64
1655
#ifndef CONFIG_USER_ONLY
1656

    
1657

    
1658
/* Leon3 cache control */
1659

    
1660
static void leon3_cache_control_int(void)
1661
{
1662
    uint32_t state = 0;
1663

    
1664
    if (env->cache_control & CACHE_CTRL_IF) {
1665
        /* Instruction cache state */
1666
        state = env->cache_control & CACHE_STATE_MASK;
1667
        if (state == CACHE_ENABLED) {
1668
            state = CACHE_FROZEN;
1669
            DPRINTF_CACHE_CONTROL("Instruction cache: freeze\n");
1670
        }
1671

    
1672
        env->cache_control &= ~CACHE_STATE_MASK;
1673
        env->cache_control |= state;
1674
    }
1675

    
1676
    if (env->cache_control & CACHE_CTRL_DF) {
1677
        /* Data cache state */
1678
        state = (env->cache_control >> 2) & CACHE_STATE_MASK;
1679
        if (state == CACHE_ENABLED) {
1680
            state = CACHE_FROZEN;
1681
            DPRINTF_CACHE_CONTROL("Data cache: freeze\n");
1682
        }
1683

    
1684
        env->cache_control &= ~(CACHE_STATE_MASK << 2);
1685
        env->cache_control |= (state << 2);
1686
    }
1687
}
1688

    
1689
static void leon3_cache_control_st(target_ulong addr, uint64_t val, int size)
1690
{
1691
    DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
1692
                          addr, val, size);
1693

    
1694
    if (size != 4) {
1695
        DPRINTF_CACHE_CONTROL("32bits only\n");
1696
        return;
1697
    }
1698

    
1699
    switch (addr) {
1700
    case 0x00:              /* Cache control */
1701

    
1702
        /* These values must always be read as zeros */
1703
        val &= ~CACHE_CTRL_FD;
1704
        val &= ~CACHE_CTRL_FI;
1705
        val &= ~CACHE_CTRL_IB;
1706
        val &= ~CACHE_CTRL_IP;
1707
        val &= ~CACHE_CTRL_DP;
1708

    
1709
        env->cache_control = val;
1710
        break;
1711
    case 0x04:              /* Instruction cache configuration */
1712
    case 0x08:              /* Data cache configuration */
1713
        /* Read Only */
1714
        break;
1715
    default:
1716
        DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
1717
        break;
1718
    };
1719
}
1720

    
1721
static uint64_t leon3_cache_control_ld(target_ulong addr, int size)
1722
{
1723
    uint64_t ret = 0;
1724

    
1725
    if (size != 4) {
1726
        DPRINTF_CACHE_CONTROL("32bits only\n");
1727
        return 0;
1728
    }
1729

    
1730
    switch (addr) {
1731
    case 0x00:              /* Cache control */
1732
        ret = env->cache_control;
1733
        break;
1734

    
1735
        /* Configuration registers are read and only always keep those
1736
           predefined values */
1737

    
1738
    case 0x04:              /* Instruction cache configuration */
1739
        ret = 0x10220000;
1740
        break;
1741
    case 0x08:              /* Data cache configuration */
1742
        ret = 0x18220000;
1743
        break;
1744
    default:
1745
        DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
1746
        break;
1747
    };
1748
    DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
1749
                          addr, ret, size);
1750
    return ret;
1751
}
1752

    
1753
void leon3_irq_manager(void *irq_manager, int intno)
1754
{
1755
    leon3_irq_ack(irq_manager, intno);
1756
    leon3_cache_control_int();
1757
}
1758

    
1759
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1760
{
1761
    uint64_t ret = 0;
1762
#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
1763
    uint32_t last_addr = addr;
1764
#endif
1765

    
1766
    helper_check_align(addr, size - 1);
1767
    switch (asi) {
1768
    case 2: /* SuperSparc MXCC registers and Leon3 cache control */
1769
        switch (addr) {
1770
        case 0x00:          /* Leon3 Cache Control */
1771
        case 0x08:          /* Leon3 Instruction Cache config */
1772
        case 0x0C:          /* Leon3 Date Cache config */
1773
            if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
1774
                ret = leon3_cache_control_ld(addr, size);
1775
            }
1776
            break;
1777
        case 0x01c00a00: /* MXCC control register */
1778
            if (size == 8)
1779
                ret = env->mxccregs[3];
1780
            else
1781
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1782
                             size);
1783
            break;
1784
        case 0x01c00a04: /* MXCC control register */
1785
            if (size == 4)
1786
                ret = env->mxccregs[3];
1787
            else
1788
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1789
                             size);
1790
            break;
1791
        case 0x01c00c00: /* Module reset register */
1792
            if (size == 8) {
1793
                ret = env->mxccregs[5];
1794
                // should we do something here?
1795
            } else
1796
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1797
                             size);
1798
            break;
1799
        case 0x01c00f00: /* MBus port address register */
1800
            if (size == 8)
1801
                ret = env->mxccregs[7];
1802
            else
1803
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1804
                             size);
1805
            break;
1806
        default:
1807
            DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1808
                         size);
1809
            break;
1810
        }
1811
        DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
1812
                     "addr = %08x -> ret = %" PRIx64 ","
1813
                     "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
1814
#ifdef DEBUG_MXCC
1815
        dump_mxcc(env);
1816
#endif
1817
        break;
1818
    case 3: /* MMU probe */
1819
        {
1820
            int mmulev;
1821

    
1822
            mmulev = (addr >> 8) & 15;
1823
            if (mmulev > 4)
1824
                ret = 0;
1825
            else
1826
                ret = mmu_probe(env, addr, mmulev);
1827
            DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
1828
                        addr, mmulev, ret);
1829
        }
1830
        break;
1831
    case 4: /* read MMU regs */
1832
        {
1833
            int reg = (addr >> 8) & 0x1f;
1834

    
1835
            ret = env->mmuregs[reg];
1836
            if (reg == 3) /* Fault status cleared on read */
1837
                env->mmuregs[3] = 0;
1838
            else if (reg == 0x13) /* Fault status read */
1839
                ret = env->mmuregs[3];
1840
            else if (reg == 0x14) /* Fault address read */
1841
                ret = env->mmuregs[4];
1842
            DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
1843
        }
1844
        break;
1845
    case 5: // Turbosparc ITLB Diagnostic
1846
    case 6: // Turbosparc DTLB Diagnostic
1847
    case 7: // Turbosparc IOTLB Diagnostic
1848
        break;
1849
    case 9: /* Supervisor code access */
1850
        switch(size) {
1851
        case 1:
1852
            ret = ldub_code(addr);
1853
            break;
1854
        case 2:
1855
            ret = lduw_code(addr);
1856
            break;
1857
        default:
1858
        case 4:
1859
            ret = ldl_code(addr);
1860
            break;
1861
        case 8:
1862
            ret = ldq_code(addr);
1863
            break;
1864
        }
1865
        break;
1866
    case 0xa: /* User data access */
1867
        switch(size) {
1868
        case 1:
1869
            ret = ldub_user(addr);
1870
            break;
1871
        case 2:
1872
            ret = lduw_user(addr);
1873
            break;
1874
        default:
1875
        case 4:
1876
            ret = ldl_user(addr);
1877
            break;
1878
        case 8:
1879
            ret = ldq_user(addr);
1880
            break;
1881
        }
1882
        break;
1883
    case 0xb: /* Supervisor data access */
1884
        switch(size) {
1885
        case 1:
1886
            ret = ldub_kernel(addr);
1887
            break;
1888
        case 2:
1889
            ret = lduw_kernel(addr);
1890
            break;
1891
        default:
1892
        case 4:
1893
            ret = ldl_kernel(addr);
1894
            break;
1895
        case 8:
1896
            ret = ldq_kernel(addr);
1897
            break;
1898
        }
1899
        break;
1900
    case 0xc: /* I-cache tag */
1901
    case 0xd: /* I-cache data */
1902
    case 0xe: /* D-cache tag */
1903
    case 0xf: /* D-cache data */
1904
        break;
1905
    case 0x20: /* MMU passthrough */
1906
        switch(size) {
1907
        case 1:
1908
            ret = ldub_phys(addr);
1909
            break;
1910
        case 2:
1911
            ret = lduw_phys(addr);
1912
            break;
1913
        default:
1914
        case 4:
1915
            ret = ldl_phys(addr);
1916
            break;
1917
        case 8:
1918
            ret = ldq_phys(addr);
1919
            break;
1920
        }
1921
        break;
1922
    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1923
        switch(size) {
1924
        case 1:
1925
            ret = ldub_phys((target_phys_addr_t)addr
1926
                            | ((target_phys_addr_t)(asi & 0xf) << 32));
1927
            break;
1928
        case 2:
1929
            ret = lduw_phys((target_phys_addr_t)addr
1930
                            | ((target_phys_addr_t)(asi & 0xf) << 32));
1931
            break;
1932
        default:
1933
        case 4:
1934
            ret = ldl_phys((target_phys_addr_t)addr
1935
                           | ((target_phys_addr_t)(asi & 0xf) << 32));
1936
            break;
1937
        case 8:
1938
            ret = ldq_phys((target_phys_addr_t)addr
1939
                           | ((target_phys_addr_t)(asi & 0xf) << 32));
1940
            break;
1941
        }
1942
        break;
1943
    case 0x30: // Turbosparc secondary cache diagnostic
1944
    case 0x31: // Turbosparc RAM snoop
1945
    case 0x32: // Turbosparc page table descriptor diagnostic
1946
    case 0x39: /* data cache diagnostic register */
1947
        ret = 0;
1948
        break;
1949
    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
1950
        {
1951
            int reg = (addr >> 8) & 3;
1952

    
1953
            switch(reg) {
1954
            case 0: /* Breakpoint Value (Addr) */
1955
                ret = env->mmubpregs[reg];
1956
                break;
1957
            case 1: /* Breakpoint Mask */
1958
                ret = env->mmubpregs[reg];
1959
                break;
1960
            case 2: /* Breakpoint Control */
1961
                ret = env->mmubpregs[reg];
1962
                break;
1963
            case 3: /* Breakpoint Status */
1964
                ret = env->mmubpregs[reg];
1965
                env->mmubpregs[reg] = 0ULL;
1966
                break;
1967
            }
1968
            DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
1969
                        ret);
1970
        }
1971
        break;
1972
    case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
1973
        ret = env->mmubpctrv;
1974
        break;
1975
    case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
1976
        ret = env->mmubpctrc;
1977
        break;
1978
    case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
1979
        ret = env->mmubpctrs;
1980
        break;
1981
    case 0x4c: /* SuperSPARC MMU Breakpoint Action */
1982
        ret = env->mmubpaction;
1983
        break;
1984
    case 8: /* User code access, XXX */
1985
    default:
1986
        do_unassigned_access(addr, 0, 0, asi, size);
1987
        ret = 0;
1988
        break;
1989
    }
1990
    if (sign) {
1991
        switch(size) {
1992
        case 1:
1993
            ret = (int8_t) ret;
1994
            break;
1995
        case 2:
1996
            ret = (int16_t) ret;
1997
            break;
1998
        case 4:
1999
            ret = (int32_t) ret;
2000
            break;
2001
        default:
2002
            break;
2003
        }
2004
    }
2005
#ifdef DEBUG_ASI
2006
    dump_asi("read ", last_addr, asi, size, ret);
2007
#endif
2008
    return ret;
2009
}
2010

    
2011
void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
2012
{
2013
    helper_check_align(addr, size - 1);
2014
    switch(asi) {
2015
    case 2: /* SuperSparc MXCC registers and Leon3 cache control */
2016
        switch (addr) {
2017
        case 0x00:          /* Leon3 Cache Control */
2018
        case 0x08:          /* Leon3 Instruction Cache config */
2019
        case 0x0C:          /* Leon3 Date Cache config */
2020
            if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
2021
                leon3_cache_control_st(addr, val, size);
2022
            }
2023
            break;
2024

    
2025
        case 0x01c00000: /* MXCC stream data register 0 */
2026
            if (size == 8)
2027
                env->mxccdata[0] = val;
2028
            else
2029
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2030
                             size);
2031
            break;
2032
        case 0x01c00008: /* MXCC stream data register 1 */
2033
            if (size == 8)
2034
                env->mxccdata[1] = val;
2035
            else
2036
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2037
                             size);
2038
            break;
2039
        case 0x01c00010: /* MXCC stream data register 2 */
2040
            if (size == 8)
2041
                env->mxccdata[2] = val;
2042
            else
2043
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2044
                             size);
2045
            break;
2046
        case 0x01c00018: /* MXCC stream data register 3 */
2047
            if (size == 8)
2048
                env->mxccdata[3] = val;
2049
            else
2050
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2051
                             size);
2052
            break;
2053
        case 0x01c00100: /* MXCC stream source */
2054
            if (size == 8)
2055
                env->mxccregs[0] = val;
2056
            else
2057
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2058
                             size);
2059
            env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2060
                                        0);
2061
            env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2062
                                        8);
2063
            env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2064
                                        16);
2065
            env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2066
                                        24);
2067
            break;
2068
        case 0x01c00200: /* MXCC stream destination */
2069
            if (size == 8)
2070
                env->mxccregs[1] = val;
2071
            else
2072
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2073
                             size);
2074
            stq_phys((env->mxccregs[1] & 0xffffffffULL) +  0,
2075
                     env->mxccdata[0]);
2076
            stq_phys((env->mxccregs[1] & 0xffffffffULL) +  8,
2077
                     env->mxccdata[1]);
2078
            stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
2079
                     env->mxccdata[2]);
2080
            stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
2081
                     env->mxccdata[3]);
2082
            break;
2083
        case 0x01c00a00: /* MXCC control register */
2084
            if (size == 8)
2085
                env->mxccregs[3] = val;
2086
            else
2087
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2088
                             size);
2089
            break;
2090
        case 0x01c00a04: /* MXCC control register */
2091
            if (size == 4)
2092
                env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
2093
                    | val;
2094
            else
2095
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2096
                             size);
2097
            break;
2098
        case 0x01c00e00: /* MXCC error register  */
2099
            // writing a 1 bit clears the error
2100
            if (size == 8)
2101
                env->mxccregs[6] &= ~val;
2102
            else
2103
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2104
                             size);
2105
            break;
2106
        case 0x01c00f00: /* MBus port address register */
2107
            if (size == 8)
2108
                env->mxccregs[7] = val;
2109
            else
2110
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2111
                             size);
2112
            break;
2113
        default:
2114
            DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
2115
                         size);
2116
            break;
2117
        }
2118
        DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
2119
                     asi, size, addr, val);
2120
#ifdef DEBUG_MXCC
2121
        dump_mxcc(env);
2122
#endif
2123
        break;
2124
    case 3: /* MMU flush */
2125
        {
2126
            int mmulev;
2127

    
2128
            mmulev = (addr >> 8) & 15;
2129
            DPRINTF_MMU("mmu flush level %d\n", mmulev);
2130
            switch (mmulev) {
2131
            case 0: // flush page
2132
                tlb_flush_page(env, addr & 0xfffff000);
2133
                break;
2134
            case 1: // flush segment (256k)
2135
            case 2: // flush region (16M)
2136
            case 3: // flush context (4G)
2137
            case 4: // flush entire
2138
                tlb_flush(env, 1);
2139
                break;
2140
            default:
2141
                break;
2142
            }
2143
#ifdef DEBUG_MMU
2144
            dump_mmu(stdout, fprintf, env);
2145
#endif
2146
        }
2147
        break;
2148
    case 4: /* write MMU regs */
2149
        {
2150
            int reg = (addr >> 8) & 0x1f;
2151
            uint32_t oldreg;
2152

    
2153
            oldreg = env->mmuregs[reg];
2154
            switch(reg) {
2155
            case 0: // Control Register
2156
                env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
2157
                                    (val & 0x00ffffff);
2158
                // Mappings generated during no-fault mode or MMU
2159
                // disabled mode are invalid in normal mode
2160
                if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
2161
                    (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
2162
                    tlb_flush(env, 1);
2163
                break;
2164
            case 1: // Context Table Pointer Register
2165
                env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
2166
                break;
2167
            case 2: // Context Register
2168
                env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
2169
                if (oldreg != env->mmuregs[reg]) {
2170
                    /* we flush when the MMU context changes because
2171
                       QEMU has no MMU context support */
2172
                    tlb_flush(env, 1);
2173
                }
2174
                break;
2175
            case 3: // Synchronous Fault Status Register with Clear
2176
            case 4: // Synchronous Fault Address Register
2177
                break;
2178
            case 0x10: // TLB Replacement Control Register
2179
                env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
2180
                break;
2181
            case 0x13: // Synchronous Fault Status Register with Read and Clear
2182
                env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
2183
                break;
2184
            case 0x14: // Synchronous Fault Address Register
2185
                env->mmuregs[4] = val;
2186
                break;
2187
            default:
2188
                env->mmuregs[reg] = val;
2189
                break;
2190
            }
2191
            if (oldreg != env->mmuregs[reg]) {
2192
                DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
2193
                            reg, oldreg, env->mmuregs[reg]);
2194
            }
2195
#ifdef DEBUG_MMU
2196
            dump_mmu(stdout, fprintf, env);
2197
#endif
2198
        }
2199
        break;
2200
    case 5: // Turbosparc ITLB Diagnostic
2201
    case 6: // Turbosparc DTLB Diagnostic
2202
    case 7: // Turbosparc IOTLB Diagnostic
2203
        break;
2204
    case 0xa: /* User data access */
2205
        switch(size) {
2206
        case 1:
2207
            stb_user(addr, val);
2208
            break;
2209
        case 2:
2210
            stw_user(addr, val);
2211
            break;
2212
        default:
2213
        case 4:
2214
            stl_user(addr, val);
2215
            break;
2216
        case 8:
2217
            stq_user(addr, val);
2218
            break;
2219
        }
2220
        break;
2221
    case 0xb: /* Supervisor data access */
2222
        switch(size) {
2223
        case 1:
2224
            stb_kernel(addr, val);
2225
            break;
2226
        case 2:
2227
            stw_kernel(addr, val);
2228
            break;
2229
        default:
2230
        case 4:
2231
            stl_kernel(addr, val);
2232
            break;
2233
        case 8:
2234
            stq_kernel(addr, val);
2235
            break;
2236
        }
2237
        break;
2238
    case 0xc: /* I-cache tag */
2239
    case 0xd: /* I-cache data */
2240
    case 0xe: /* D-cache tag */
2241
    case 0xf: /* D-cache data */
2242
    case 0x10: /* I/D-cache flush page */
2243
    case 0x11: /* I/D-cache flush segment */
2244
    case 0x12: /* I/D-cache flush region */
2245
    case 0x13: /* I/D-cache flush context */
2246
    case 0x14: /* I/D-cache flush user */
2247
        break;
2248
    case 0x17: /* Block copy, sta access */
2249
        {
2250
            // val = src
2251
            // addr = dst
2252
            // copy 32 bytes
2253
            unsigned int i;
2254
            uint32_t src = val & ~3, dst = addr & ~3, temp;
2255

    
2256
            for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
2257
                temp = ldl_kernel(src);
2258
                stl_kernel(dst, temp);
2259
            }
2260
        }
2261
        break;
2262
    case 0x1f: /* Block fill, stda access */
2263
        {
2264
            // addr = dst
2265
            // fill 32 bytes with val
2266
            unsigned int i;
2267
            uint32_t dst = addr & 7;
2268

    
2269
            for (i = 0; i < 32; i += 8, dst += 8)
2270
                stq_kernel(dst, val);
2271
        }
2272
        break;
2273
    case 0x20: /* MMU passthrough */
2274
        {
2275
            switch(size) {
2276
            case 1:
2277
                stb_phys(addr, val);
2278
                break;
2279
            case 2:
2280
                stw_phys(addr, val);
2281
                break;
2282
            case 4:
2283
            default:
2284
                stl_phys(addr, val);
2285
                break;
2286
            case 8:
2287
                stq_phys(addr, val);
2288
                break;
2289
            }
2290
        }
2291
        break;
2292
    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
2293
        {
2294
            switch(size) {
2295
            case 1:
2296
                stb_phys((target_phys_addr_t)addr
2297
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2298
                break;
2299
            case 2:
2300
                stw_phys((target_phys_addr_t)addr
2301
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2302
                break;
2303
            case 4:
2304
            default:
2305
                stl_phys((target_phys_addr_t)addr
2306
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2307
                break;
2308
            case 8:
2309
                stq_phys((target_phys_addr_t)addr
2310
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2311
                break;
2312
            }
2313
        }
2314
        break;
2315
    case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
2316
    case 0x31: // store buffer data, Ross RT620 I-cache flush or
2317
               // Turbosparc snoop RAM
2318
    case 0x32: // store buffer control or Turbosparc page table
2319
               // descriptor diagnostic
2320
    case 0x36: /* I-cache flash clear */
2321
    case 0x37: /* D-cache flash clear */
2322
        break;
2323
    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
2324
        {
2325
            int reg = (addr >> 8) & 3;
2326

    
2327
            switch(reg) {
2328
            case 0: /* Breakpoint Value (Addr) */
2329
                env->mmubpregs[reg] = (val & 0xfffffffffULL);
2330
                break;
2331
            case 1: /* Breakpoint Mask */
2332
                env->mmubpregs[reg] = (val & 0xfffffffffULL);
2333
                break;
2334
            case 2: /* Breakpoint Control */
2335
                env->mmubpregs[reg] = (val & 0x7fULL);
2336
                break;
2337
            case 3: /* Breakpoint Status */
2338
                env->mmubpregs[reg] = (val & 0xfULL);
2339
                break;
2340
            }
2341
            DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
2342
                        env->mmuregs[reg]);
2343
        }
2344
        break;
2345
    case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
2346
        env->mmubpctrv = val & 0xffffffff;
2347
        break;
2348
    case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
2349
        env->mmubpctrc = val & 0x3;
2350
        break;
2351
    case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
2352
        env->mmubpctrs = val & 0x3;
2353
        break;
2354
    case 0x4c: /* SuperSPARC MMU Breakpoint Action */
2355
        env->mmubpaction = val & 0x1fff;
2356
        break;
2357
    case 8: /* User code access, XXX */
2358
    case 9: /* Supervisor code access, XXX */
2359
    default:
2360
        do_unassigned_access(addr, 1, 0, asi, size);
2361
        break;
2362
    }
2363
#ifdef DEBUG_ASI
2364
    dump_asi("write", addr, asi, size, val);
2365
#endif
2366
}
2367

    
2368
#endif /* CONFIG_USER_ONLY */
2369
#else /* TARGET_SPARC64 */
2370

    
2371
#ifdef CONFIG_USER_ONLY
2372
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2373
{
2374
    uint64_t ret = 0;
2375
#if defined(DEBUG_ASI)
2376
    target_ulong last_addr = addr;
2377
#endif
2378

    
2379
    if (asi < 0x80)
2380
        raise_exception(TT_PRIV_ACT);
2381

    
2382
    helper_check_align(addr, size - 1);
2383
    addr = asi_address_mask(env, asi, addr);
2384

    
2385
    switch (asi) {
2386
    case 0x82: // Primary no-fault
2387
    case 0x8a: // Primary no-fault LE
2388
        if (page_check_range(addr, size, PAGE_READ) == -1) {
2389
#ifdef DEBUG_ASI
2390
            dump_asi("read ", last_addr, asi, size, ret);
2391
#endif
2392
            return 0;
2393
        }
2394
        // Fall through
2395
    case 0x80: // Primary
2396
    case 0x88: // Primary LE
2397
        {
2398
            switch(size) {
2399
            case 1:
2400
                ret = ldub_raw(addr);
2401
                break;
2402
            case 2:
2403
                ret = lduw_raw(addr);
2404
                break;
2405
            case 4:
2406
                ret = ldl_raw(addr);
2407
                break;
2408
            default:
2409
            case 8:
2410
                ret = ldq_raw(addr);
2411
                break;
2412
            }
2413
        }
2414
        break;
2415
    case 0x83: // Secondary no-fault
2416
    case 0x8b: // Secondary no-fault LE
2417
        if (page_check_range(addr, size, PAGE_READ) == -1) {
2418
#ifdef DEBUG_ASI
2419
            dump_asi("read ", last_addr, asi, size, ret);
2420
#endif
2421
            return 0;
2422
        }
2423
        // Fall through
2424
    case 0x81: // Secondary
2425
    case 0x89: // Secondary LE
2426
        // XXX
2427
        break;
2428
    default:
2429
        break;
2430
    }
2431

    
2432
    /* Convert from little endian */
2433
    switch (asi) {
2434
    case 0x88: // Primary LE
2435
    case 0x89: // Secondary LE
2436
    case 0x8a: // Primary no-fault LE
2437
    case 0x8b: // Secondary no-fault LE
2438
        switch(size) {
2439
        case 2:
2440
            ret = bswap16(ret);
2441
            break;
2442
        case 4:
2443
            ret = bswap32(ret);
2444
            break;
2445
        case 8:
2446
            ret = bswap64(ret);
2447
            break;
2448
        default:
2449
            break;
2450
        }
2451
    default:
2452
        break;
2453
    }
2454

    
2455
    /* Convert to signed number */
2456
    if (sign) {
2457
        switch(size) {
2458
        case 1:
2459
            ret = (int8_t) ret;
2460
            break;
2461
        case 2:
2462
            ret = (int16_t) ret;
2463
            break;
2464
        case 4:
2465
            ret = (int32_t) ret;
2466
            break;
2467
        default:
2468
            break;
2469
        }
2470
    }
2471
#ifdef DEBUG_ASI
2472
    dump_asi("read ", last_addr, asi, size, ret);
2473
#endif
2474
    return ret;
2475
}
2476

    
2477
void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2478
{
2479
#ifdef DEBUG_ASI
2480
    dump_asi("write", addr, asi, size, val);
2481
#endif
2482
    if (asi < 0x80)
2483
        raise_exception(TT_PRIV_ACT);
2484

    
2485
    helper_check_align(addr, size - 1);
2486
    addr = asi_address_mask(env, asi, addr);
2487

    
2488
    /* Convert to little endian */
2489
    switch (asi) {
2490
    case 0x88: // Primary LE
2491
    case 0x89: // Secondary LE
2492
        switch(size) {
2493
        case 2:
2494
            val = bswap16(val);
2495
            break;
2496
        case 4:
2497
            val = bswap32(val);
2498
            break;
2499
        case 8:
2500
            val = bswap64(val);
2501
            break;
2502
        default:
2503
            break;
2504
        }
2505
    default:
2506
        break;
2507
    }
2508

    
2509
    switch(asi) {
2510
    case 0x80: // Primary
2511
    case 0x88: // Primary LE
2512
        {
2513
            switch(size) {
2514
            case 1:
2515
                stb_raw(addr, val);
2516
                break;
2517
            case 2:
2518
                stw_raw(addr, val);
2519
                break;
2520
            case 4:
2521
                stl_raw(addr, val);
2522
                break;
2523
            case 8:
2524
            default:
2525
                stq_raw(addr, val);
2526
                break;
2527
            }
2528
        }
2529
        break;
2530
    case 0x81: // Secondary
2531
    case 0x89: // Secondary LE
2532
        // XXX
2533
        return;
2534

    
2535
    case 0x82: // Primary no-fault, RO
2536
    case 0x83: // Secondary no-fault, RO
2537
    case 0x8a: // Primary no-fault LE, RO
2538
    case 0x8b: // Secondary no-fault LE, RO
2539
    default:
2540
        do_unassigned_access(addr, 1, 0, 1, size);
2541
        return;
2542
    }
2543
}
2544

    
2545
#else /* CONFIG_USER_ONLY */
2546

    
2547
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2548
{
2549
    uint64_t ret = 0;
2550
#if defined(DEBUG_ASI)
2551
    target_ulong last_addr = addr;
2552
#endif
2553

    
2554
    asi &= 0xff;
2555

    
2556
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2557
        || (cpu_has_hypervisor(env)
2558
            && asi >= 0x30 && asi < 0x80
2559
            && !(env->hpstate & HS_PRIV)))
2560
        raise_exception(TT_PRIV_ACT);
2561

    
2562
    helper_check_align(addr, size - 1);
2563
    addr = asi_address_mask(env, asi, addr);
2564

    
2565
    switch (asi) {
2566
    case 0x82: // Primary no-fault
2567
    case 0x8a: // Primary no-fault LE
2568
    case 0x83: // Secondary no-fault
2569
    case 0x8b: // Secondary no-fault LE
2570
        {
2571
            /* secondary space access has lowest asi bit equal to 1 */
2572
            int access_mmu_idx = ( asi & 1 ) ? MMU_KERNEL_IDX
2573
                                             : MMU_KERNEL_SECONDARY_IDX;
2574

    
2575
            if (cpu_get_phys_page_nofault(env, addr, access_mmu_idx) == -1ULL) {
2576
#ifdef DEBUG_ASI
2577
                dump_asi("read ", last_addr, asi, size, ret);
2578
#endif
2579
                return 0;
2580
            }
2581
        }
2582
        // Fall through
2583
    case 0x10: // As if user primary
2584
    case 0x11: // As if user secondary
2585
    case 0x18: // As if user primary LE
2586
    case 0x19: // As if user secondary LE
2587
    case 0x80: // Primary
2588
    case 0x81: // Secondary
2589
    case 0x88: // Primary LE
2590
    case 0x89: // Secondary LE
2591
    case 0xe2: // UA2007 Primary block init
2592
    case 0xe3: // UA2007 Secondary block init
2593
        if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2594
            if (cpu_hypervisor_mode(env)) {
2595
                switch(size) {
2596
                case 1:
2597
                    ret = ldub_hypv(addr);
2598
                    break;
2599
                case 2:
2600
                    ret = lduw_hypv(addr);
2601
                    break;
2602
                case 4:
2603
                    ret = ldl_hypv(addr);
2604
                    break;
2605
                default:
2606
                case 8:
2607
                    ret = ldq_hypv(addr);
2608
                    break;
2609
                }
2610
            } else {
2611
                /* secondary space access has lowest asi bit equal to 1 */
2612
                if (asi & 1) {
2613
                    switch(size) {
2614
                    case 1:
2615
                        ret = ldub_kernel_secondary(addr);
2616
                        break;
2617
                    case 2:
2618
                        ret = lduw_kernel_secondary(addr);
2619
                        break;
2620
                    case 4:
2621
                        ret = ldl_kernel_secondary(addr);
2622
                        break;
2623
                    default:
2624
                    case 8:
2625
                        ret = ldq_kernel_secondary(addr);
2626
                        break;
2627
                    }
2628
                } else {
2629
                    switch(size) {
2630
                    case 1:
2631
                        ret = ldub_kernel(addr);
2632
                        break;
2633
                    case 2:
2634
                        ret = lduw_kernel(addr);
2635
                        break;
2636
                    case 4:
2637
                        ret = ldl_kernel(addr);
2638
                        break;
2639
                    default:
2640
                    case 8:
2641
                        ret = ldq_kernel(addr);
2642
                        break;
2643
                    }
2644
                }
2645
            }
2646
        } else {
2647
            /* secondary space access has lowest asi bit equal to 1 */
2648
            if (asi & 1) {
2649
                switch(size) {
2650
                case 1:
2651
                    ret = ldub_user_secondary(addr);
2652
                    break;
2653
                case 2:
2654
                    ret = lduw_user_secondary(addr);
2655
                    break;
2656
                case 4:
2657
                    ret = ldl_user_secondary(addr);
2658
                    break;
2659
                default:
2660
                case 8:
2661
                    ret = ldq_user_secondary(addr);
2662
                    break;
2663
                }
2664
            } else {
2665
                switch(size) {
2666
                case 1:
2667
                    ret = ldub_user(addr);
2668
                    break;
2669
                case 2:
2670
                    ret = lduw_user(addr);
2671
                    break;
2672
                case 4:
2673
                    ret = ldl_user(addr);
2674
                    break;
2675
                default:
2676
                case 8:
2677
                    ret = ldq_user(addr);
2678
                    break;
2679
                }
2680
            }
2681
        }
2682
        break;
2683
    case 0x14: // Bypass
2684
    case 0x15: // Bypass, non-cacheable
2685
    case 0x1c: // Bypass LE
2686
    case 0x1d: // Bypass, non-cacheable LE
2687
        {
2688
            switch(size) {
2689
            case 1:
2690
                ret = ldub_phys(addr);
2691
                break;
2692
            case 2:
2693
                ret = lduw_phys(addr);
2694
                break;
2695
            case 4:
2696
                ret = ldl_phys(addr);
2697
                break;
2698
            default:
2699
            case 8:
2700
                ret = ldq_phys(addr);
2701
                break;
2702
            }
2703
            break;
2704
        }
2705
    case 0x24: // Nucleus quad LDD 128 bit atomic
2706
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2707
        //  Only ldda allowed
2708
        raise_exception(TT_ILL_INSN);
2709
        return 0;
2710
    case 0x04: // Nucleus
2711
    case 0x0c: // Nucleus Little Endian (LE)
2712
    {
2713
        switch(size) {
2714
        case 1:
2715
            ret = ldub_nucleus(addr);
2716
            break;
2717
        case 2:
2718
            ret = lduw_nucleus(addr);
2719
            break;
2720
        case 4:
2721
            ret = ldl_nucleus(addr);
2722
            break;
2723
        default:
2724
        case 8:
2725
            ret = ldq_nucleus(addr);
2726
            break;
2727
        }
2728
        break;
2729
    }
2730
    case 0x4a: // UPA config
2731
        // XXX
2732
        break;
2733
    case 0x45: // LSU
2734
        ret = env->lsu;
2735
        break;
2736
    case 0x50: // I-MMU regs
2737
        {
2738
            int reg = (addr >> 3) & 0xf;
2739

    
2740
            if (reg == 0) {
2741
                // I-TSB Tag Target register
2742
                ret = ultrasparc_tag_target(env->immu.tag_access);
2743
            } else {
2744
                ret = env->immuregs[reg];
2745
            }
2746

    
2747
            break;
2748
        }
2749
    case 0x51: // I-MMU 8k TSB pointer
2750
        {
2751
            // env->immuregs[5] holds I-MMU TSB register value
2752
            // env->immuregs[6] holds I-MMU Tag Access register value
2753
            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2754
                                         8*1024);
2755
            break;
2756
        }
2757
    case 0x52: // I-MMU 64k TSB pointer
2758
        {
2759
            // env->immuregs[5] holds I-MMU TSB register value
2760
            // env->immuregs[6] holds I-MMU Tag Access register value
2761
            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2762
                                         64*1024);
2763
            break;
2764
        }
2765
    case 0x55: // I-MMU data access
2766
        {
2767
            int reg = (addr >> 3) & 0x3f;
2768

    
2769
            ret = env->itlb[reg].tte;
2770
            break;
2771
        }
2772
    case 0x56: // I-MMU tag read
2773
        {
2774
            int reg = (addr >> 3) & 0x3f;
2775

    
2776
            ret = env->itlb[reg].tag;
2777
            break;
2778
        }
2779
    case 0x58: // D-MMU regs
2780
        {
2781
            int reg = (addr >> 3) & 0xf;
2782

    
2783
            if (reg == 0) {
2784
                // D-TSB Tag Target register
2785
                ret = ultrasparc_tag_target(env->dmmu.tag_access);
2786
            } else {
2787
                ret = env->dmmuregs[reg];
2788
            }
2789
            break;
2790
        }
2791
    case 0x59: // D-MMU 8k TSB pointer
2792
        {
2793
            // env->dmmuregs[5] holds D-MMU TSB register value
2794
            // env->dmmuregs[6] holds D-MMU Tag Access register value
2795
            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2796
                                         8*1024);
2797
            break;
2798
        }
2799
    case 0x5a: // D-MMU 64k TSB pointer
2800
        {
2801
            // env->dmmuregs[5] holds D-MMU TSB register value
2802
            // env->dmmuregs[6] holds D-MMU Tag Access register value
2803
            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2804
                                         64*1024);
2805
            break;
2806
        }
2807
    case 0x5d: // D-MMU data access
2808
        {
2809
            int reg = (addr >> 3) & 0x3f;
2810

    
2811
            ret = env->dtlb[reg].tte;
2812
            break;
2813
        }
2814
    case 0x5e: // D-MMU tag read
2815
        {
2816
            int reg = (addr >> 3) & 0x3f;
2817

    
2818
            ret = env->dtlb[reg].tag;
2819
            break;
2820
        }
2821
    case 0x46: // D-cache data
2822
    case 0x47: // D-cache tag access
2823
    case 0x4b: // E-cache error enable
2824
    case 0x4c: // E-cache asynchronous fault status
2825
    case 0x4d: // E-cache asynchronous fault address
2826
    case 0x4e: // E-cache tag data
2827
    case 0x66: // I-cache instruction access
2828
    case 0x67: // I-cache tag access
2829
    case 0x6e: // I-cache predecode
2830
    case 0x6f: // I-cache LRU etc.
2831
    case 0x76: // E-cache tag
2832
    case 0x7e: // E-cache tag
2833
        break;
2834
    case 0x5b: // D-MMU data pointer
2835
    case 0x48: // Interrupt dispatch, RO
2836
    case 0x49: // Interrupt data receive
2837
    case 0x7f: // Incoming interrupt vector, RO
2838
        // XXX
2839
        break;
2840
    case 0x54: // I-MMU data in, WO
2841
    case 0x57: // I-MMU demap, WO
2842
    case 0x5c: // D-MMU data in, WO
2843
    case 0x5f: // D-MMU demap, WO
2844
    case 0x77: // Interrupt vector, WO
2845
    default:
2846
        do_unassigned_access(addr, 0, 0, 1, size);
2847
        ret = 0;
2848
        break;
2849
    }
2850

    
2851
    /* Convert from little endian */
2852
    switch (asi) {
2853
    case 0x0c: // Nucleus Little Endian (LE)
2854
    case 0x18: // As if user primary LE
2855
    case 0x19: // As if user secondary LE
2856
    case 0x1c: // Bypass LE
2857
    case 0x1d: // Bypass, non-cacheable LE
2858
    case 0x88: // Primary LE
2859
    case 0x89: // Secondary LE
2860
    case 0x8a: // Primary no-fault LE
2861
    case 0x8b: // Secondary no-fault LE
2862
        switch(size) {
2863
        case 2:
2864
            ret = bswap16(ret);
2865
            break;
2866
        case 4:
2867
            ret = bswap32(ret);
2868
            break;
2869
        case 8:
2870
            ret = bswap64(ret);
2871
            break;
2872
        default:
2873
            break;
2874
        }
2875
    default:
2876
        break;
2877
    }
2878

    
2879
    /* Convert to signed number */
2880
    if (sign) {
2881
        switch(size) {
2882
        case 1:
2883
            ret = (int8_t) ret;
2884
            break;
2885
        case 2:
2886
            ret = (int16_t) ret;
2887
            break;
2888
        case 4:
2889
            ret = (int32_t) ret;
2890
            break;
2891
        default:
2892
            break;
2893
        }
2894
    }
2895
#ifdef DEBUG_ASI
2896
    dump_asi("read ", last_addr, asi, size, ret);
2897
#endif
2898
    return ret;
2899
}
2900

    
2901
void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2902
{
2903
#ifdef DEBUG_ASI
2904
    dump_asi("write", addr, asi, size, val);
2905
#endif
2906

    
2907
    asi &= 0xff;
2908

    
2909
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2910
        || (cpu_has_hypervisor(env)
2911
            && asi >= 0x30 && asi < 0x80
2912
            && !(env->hpstate & HS_PRIV)))
2913
        raise_exception(TT_PRIV_ACT);
2914

    
2915
    helper_check_align(addr, size - 1);
2916
    addr = asi_address_mask(env, asi, addr);
2917

    
2918
    /* Convert to little endian */
2919
    switch (asi) {
2920
    case 0x0c: // Nucleus Little Endian (LE)
2921
    case 0x18: // As if user primary LE
2922
    case 0x19: // As if user secondary LE
2923
    case 0x1c: // Bypass LE
2924
    case 0x1d: // Bypass, non-cacheable LE
2925
    case 0x88: // Primary LE
2926
    case 0x89: // Secondary LE
2927
        switch(size) {
2928
        case 2:
2929
            val = bswap16(val);
2930
            break;
2931
        case 4:
2932
            val = bswap32(val);
2933
            break;
2934
        case 8:
2935
            val = bswap64(val);
2936
            break;
2937
        default:
2938
            break;
2939
        }
2940
    default:
2941
        break;
2942
    }
2943

    
2944
    switch(asi) {
2945
    case 0x10: // As if user primary
2946
    case 0x11: // As if user secondary
2947
    case 0x18: // As if user primary LE
2948
    case 0x19: // As if user secondary LE
2949
    case 0x80: // Primary
2950
    case 0x81: // Secondary
2951
    case 0x88: // Primary LE
2952
    case 0x89: // Secondary LE
2953
    case 0xe2: // UA2007 Primary block init
2954
    case 0xe3: // UA2007 Secondary block init
2955
        if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2956
            if (cpu_hypervisor_mode(env)) {
2957
                switch(size) {
2958
                case 1:
2959
                    stb_hypv(addr, val);
2960
                    break;
2961
                case 2:
2962
                    stw_hypv(addr, val);
2963
                    break;
2964
                case 4:
2965
                    stl_hypv(addr, val);
2966
                    break;
2967
                case 8:
2968
                default:
2969
                    stq_hypv(addr, val);
2970
                    break;
2971
                }
2972
            } else {
2973
                /* secondary space access has lowest asi bit equal to 1 */
2974
                if (asi & 1) {
2975
                    switch(size) {
2976
                    case 1:
2977
                        stb_kernel_secondary(addr, val);
2978
                        break;
2979
                    case 2:
2980
                        stw_kernel_secondary(addr, val);
2981
                        break;
2982
                    case 4:
2983
                        stl_kernel_secondary(addr, val);
2984
                        break;
2985
                    case 8:
2986
                    default:
2987
                        stq_kernel_secondary(addr, val);
2988
                        break;
2989
                    }
2990
                } else {
2991
                    switch(size) {
2992
                    case 1:
2993
                        stb_kernel(addr, val);
2994
                        break;
2995
                    case 2:
2996
                        stw_kernel(addr, val);
2997
                        break;
2998
                    case 4:
2999
                        stl_kernel(addr, val);
3000
                        break;
3001
                    case 8:
3002
                    default:
3003
                        stq_kernel(addr, val);
3004
                        break;
3005
                    }
3006
                }
3007
            }
3008
        } else {
3009
            /* secondary space access has lowest asi bit equal to 1 */
3010
            if (asi & 1) {
3011
                switch(size) {
3012
                case 1:
3013
                    stb_user_secondary(addr, val);
3014
                    break;
3015
                case 2:
3016
                    stw_user_secondary(addr, val);
3017
                    break;
3018
                case 4:
3019
                    stl_user_secondary(addr, val);
3020
                    break;
3021
                case 8:
3022
                default:
3023
                    stq_user_secondary(addr, val);
3024
                    break;
3025
                }
3026
            } else {
3027
                switch(size) {
3028
                case 1:
3029
                    stb_user(addr, val);
3030
                    break;
3031
                case 2:
3032
                    stw_user(addr, val);
3033
                    break;
3034
                case 4:
3035
                    stl_user(addr, val);
3036
                    break;
3037
                case 8:
3038
                default:
3039
                    stq_user(addr, val);
3040
                    break;
3041
                }
3042
            }
3043
        }
3044
        break;
3045
    case 0x14: // Bypass
3046
    case 0x15: // Bypass, non-cacheable
3047
    case 0x1c: // Bypass LE
3048
    case 0x1d: // Bypass, non-cacheable LE
3049
        {
3050
            switch(size) {
3051
            case 1:
3052
                stb_phys(addr, val);
3053
                break;
3054
            case 2:
3055
                stw_phys(addr, val);
3056
                break;
3057
            case 4:
3058
                stl_phys(addr, val);
3059
                break;
3060
            case 8:
3061
            default:
3062
                stq_phys(addr, val);
3063
                break;
3064
            }
3065
        }
3066
        return;
3067
    case 0x24: // Nucleus quad LDD 128 bit atomic
3068
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
3069
        //  Only ldda allowed
3070
        raise_exception(TT_ILL_INSN);
3071
        return;
3072
    case 0x04: // Nucleus
3073
    case 0x0c: // Nucleus Little Endian (LE)
3074
    {
3075
        switch(size) {
3076
        case 1:
3077
            stb_nucleus(addr, val);
3078
            break;
3079
        case 2:
3080
            stw_nucleus(addr, val);
3081
            break;
3082
        case 4:
3083
            stl_nucleus(addr, val);
3084
            break;
3085
        default:
3086
        case 8:
3087
            stq_nucleus(addr, val);
3088
            break;
3089
        }
3090
        break;
3091
    }
3092

    
3093
    case 0x4a: // UPA config
3094
        // XXX
3095
        return;
3096
    case 0x45: // LSU
3097
        {
3098
            uint64_t oldreg;
3099

    
3100
            oldreg = env->lsu;
3101
            env->lsu = val & (DMMU_E | IMMU_E);
3102
            // Mappings generated during D/I MMU disabled mode are
3103
            // invalid in normal mode
3104
            if (oldreg != env->lsu) {
3105
                DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
3106
                            oldreg, env->lsu);
3107
#ifdef DEBUG_MMU
3108
                dump_mmu(stdout, fprintf, env1);
3109
#endif
3110
                tlb_flush(env, 1);
3111
            }
3112
            return;
3113
        }
3114
    case 0x50: // I-MMU regs
3115
        {
3116
            int reg = (addr >> 3) & 0xf;
3117
            uint64_t oldreg;
3118

    
3119
            oldreg = env->immuregs[reg];
3120
            switch(reg) {
3121
            case 0: // RO
3122
                return;
3123
            case 1: // Not in I-MMU
3124
            case 2:
3125
                return;
3126
            case 3: // SFSR
3127
                if ((val & 1) == 0)
3128
                    val = 0; // Clear SFSR
3129
                env->immu.sfsr = val;
3130
                break;
3131
            case 4: // RO
3132
                return;
3133
            case 5: // TSB access
3134
                DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
3135
                            PRIx64 "\n", env->immu.tsb, val);
3136
                env->immu.tsb = val;
3137
                break;
3138
            case 6: // Tag access
3139
                env->immu.tag_access = val;
3140
                break;
3141
            case 7:
3142
            case 8:
3143
                return;
3144
            default:
3145
                break;
3146
            }
3147

    
3148
            if (oldreg != env->immuregs[reg]) {
3149
                DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
3150
                            PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
3151
            }
3152
#ifdef DEBUG_MMU
3153
            dump_mmu(stdout, fprintf, env);
3154
#endif
3155
            return;
3156
        }
3157
    case 0x54: // I-MMU data in
3158
        replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
3159
        return;
3160
    case 0x55: // I-MMU data access
3161
        {
3162
            // TODO: auto demap
3163

    
3164
            unsigned int i = (addr >> 3) & 0x3f;
3165

    
3166
            replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
3167

    
3168
#ifdef DEBUG_MMU
3169
            DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
3170
            dump_mmu(stdout, fprintf, env);
3171
#endif
3172
            return;
3173
        }
3174
    case 0x57: // I-MMU demap
3175
        demap_tlb(env->itlb, addr, "immu", env);
3176
        return;
3177
    case 0x58: // D-MMU regs
3178
        {
3179
            int reg = (addr >> 3) & 0xf;
3180
            uint64_t oldreg;
3181

    
3182
            oldreg = env->dmmuregs[reg];
3183
            switch(reg) {
3184
            case 0: // RO
3185
            case 4:
3186
                return;
3187
            case 3: // SFSR
3188
                if ((val & 1) == 0) {
3189
                    val = 0; // Clear SFSR, Fault address
3190
                    env->dmmu.sfar = 0;
3191
                }
3192
                env->dmmu.sfsr = val;
3193
                break;
3194
            case 1: // Primary context
3195
                env->dmmu.mmu_primary_context = val;
3196
                /* can be optimized to only flush MMU_USER_IDX
3197
                   and MMU_KERNEL_IDX entries */
3198
                tlb_flush(env, 1);
3199
                break;
3200
            case 2: // Secondary context
3201
                env->dmmu.mmu_secondary_context = val;
3202
                /* can be optimized to only flush MMU_USER_SECONDARY_IDX
3203
                   and MMU_KERNEL_SECONDARY_IDX entries */
3204
                tlb_flush(env, 1);
3205
                break;
3206
            case 5: // TSB access
3207
                DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
3208
                            PRIx64 "\n", env->dmmu.tsb, val);
3209
                env->dmmu.tsb = val;
3210
                break;
3211
            case 6: // Tag access
3212
                env->dmmu.tag_access = val;
3213
                break;
3214
            case 7: // Virtual Watchpoint
3215
            case 8: // Physical Watchpoint
3216
            default:
3217
                env->dmmuregs[reg] = val;
3218
                break;
3219
            }
3220

    
3221
            if (oldreg != env->dmmuregs[reg]) {
3222
                DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
3223
                            PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
3224
            }
3225
#ifdef DEBUG_MMU
3226
            dump_mmu(stdout, fprintf, env);
3227
#endif
3228
            return;
3229
        }
3230
    case 0x5c: // D-MMU data in
3231
        replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
3232
        return;
3233
    case 0x5d: // D-MMU data access
3234
        {
3235
            unsigned int i = (addr >> 3) & 0x3f;
3236

    
3237
            replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
3238

    
3239
#ifdef DEBUG_MMU
3240
            DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
3241
            dump_mmu(stdout, fprintf, env);
3242
#endif
3243
            return;
3244
        }
3245
    case 0x5f: // D-MMU demap
3246
        demap_tlb(env->dtlb, addr, "dmmu", env);
3247
        return;
3248
    case 0x49: // Interrupt data receive
3249
        // XXX
3250
        return;
3251
    case 0x46: // D-cache data
3252
    case 0x47: // D-cache tag access
3253
    case 0x4b: // E-cache error enable
3254
    case 0x4c: // E-cache asynchronous fault status
3255
    case 0x4d: // E-cache asynchronous fault address
3256
    case 0x4e: // E-cache tag data
3257
    case 0x66: // I-cache instruction access
3258
    case 0x67: // I-cache tag access
3259
    case 0x6e: // I-cache predecode
3260
    case 0x6f: // I-cache LRU etc.
3261
    case 0x76: // E-cache tag
3262
    case 0x7e: // E-cache tag
3263
        return;
3264
    case 0x51: // I-MMU 8k TSB pointer, RO
3265
    case 0x52: // I-MMU 64k TSB pointer, RO
3266
    case 0x56: // I-MMU tag read, RO
3267
    case 0x59: // D-MMU 8k TSB pointer, RO
3268
    case 0x5a: // D-MMU 64k TSB pointer, RO
3269
    case 0x5b: // D-MMU data pointer, RO
3270
    case 0x5e: // D-MMU tag read, RO
3271
    case 0x48: // Interrupt dispatch, RO
3272
    case 0x7f: // Incoming interrupt vector, RO
3273
    case 0x82: // Primary no-fault, RO
3274
    case 0x83: // Secondary no-fault, RO
3275
    case 0x8a: // Primary no-fault LE, RO
3276
    case 0x8b: // Secondary no-fault LE, RO
3277
    default:
3278
        do_unassigned_access(addr, 1, 0, 1, size);
3279
        return;
3280
    }
3281
}
3282
#endif /* CONFIG_USER_ONLY */
3283

    
3284
void helper_ldda_asi(target_ulong addr, int asi, int rd)
3285
{
3286
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
3287
        || (cpu_has_hypervisor(env)
3288
            && asi >= 0x30 && asi < 0x80
3289
            && !(env->hpstate & HS_PRIV)))
3290
        raise_exception(TT_PRIV_ACT);
3291

    
3292
    addr = asi_address_mask(env, asi, addr);
3293

    
3294
    switch (asi) {
3295
#if !defined(CONFIG_USER_ONLY)
3296
    case 0x24: // Nucleus quad LDD 128 bit atomic
3297
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
3298
        helper_check_align(addr, 0xf);
3299
        if (rd == 0) {
3300
            env->gregs[1] = ldq_nucleus(addr + 8);
3301
            if (asi == 0x2c)
3302
                bswap64s(&env->gregs[1]);
3303
        } else if (rd < 8) {
3304
            env->gregs[rd] = ldq_nucleus(addr);
3305
            env->gregs[rd + 1] = ldq_nucleus(addr + 8);
3306
            if (asi == 0x2c) {
3307
                bswap64s(&env->gregs[rd]);
3308
                bswap64s(&env->gregs[rd + 1]);
3309
            }
3310
        } else {
3311
            env->regwptr[rd] = ldq_nucleus(addr);
3312
            env->regwptr[rd + 1] = ldq_nucleus(addr + 8);
3313
            if (asi == 0x2c) {
3314
                bswap64s(&env->regwptr[rd]);
3315
                bswap64s(&env->regwptr[rd + 1]);
3316
            }
3317
        }
3318
        break;
3319
#endif
3320
    default:
3321
        helper_check_align(addr, 0x3);
3322
        if (rd == 0)
3323
            env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
3324
        else if (rd < 8) {
3325
            env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
3326
            env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3327
        } else {
3328
            env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
3329
            env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3330
        }
3331
        break;
3332
    }
3333
}
3334

    
3335
void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
3336
{
3337
    unsigned int i;
3338
    CPU_DoubleU u;
3339

    
3340
    helper_check_align(addr, 3);
3341
    addr = asi_address_mask(env, asi, addr);
3342

    
3343
    switch (asi) {
3344
    case 0xf0: /* UA2007/JPS1 Block load primary */
3345
    case 0xf1: /* UA2007/JPS1 Block load secondary */
3346
    case 0xf8: /* UA2007/JPS1 Block load primary LE */
3347
    case 0xf9: /* UA2007/JPS1 Block load secondary LE */
3348
        if (rd & 7) {
3349
            raise_exception(TT_ILL_INSN);
3350
            return;
3351
        }
3352
        helper_check_align(addr, 0x3f);
3353
        for (i = 0; i < 16; i++) {
3354
            *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
3355
                                                         0);
3356
            addr += 4;
3357
        }
3358

    
3359
        return;
3360
    case 0x16: /* UA2007 Block load primary, user privilege */
3361
    case 0x17: /* UA2007 Block load secondary, user privilege */
3362
    case 0x1e: /* UA2007 Block load primary LE, user privilege */
3363
    case 0x1f: /* UA2007 Block load secondary LE, user privilege */
3364
    case 0x70: /* JPS1 Block load primary, user privilege */
3365
    case 0x71: /* JPS1 Block load secondary, user privilege */
3366
    case 0x78: /* JPS1 Block load primary LE, user privilege */
3367
    case 0x79: /* JPS1 Block load secondary LE, user privilege */
3368
        if (rd & 7) {
3369
            raise_exception(TT_ILL_INSN);
3370
            return;
3371
        }
3372
        helper_check_align(addr, 0x3f);
3373
        for (i = 0; i < 16; i++) {
3374
            *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x19, 4,
3375
                                                         0);
3376
            addr += 4;
3377
        }
3378

    
3379
        return;
3380
    default:
3381
        break;
3382
    }
3383

    
3384
    switch(size) {
3385
    default:
3386
    case 4:
3387
        *((uint32_t *)&env->fpr[rd]) = helper_ld_asi(addr, asi, size, 0);
3388
        break;
3389
    case 8:
3390
        u.ll = helper_ld_asi(addr, asi, size, 0);
3391
        *((uint32_t *)&env->fpr[rd++]) = u.l.upper;
3392
        *((uint32_t *)&env->fpr[rd++]) = u.l.lower;
3393
        break;
3394
    case 16:
3395
        u.ll = helper_ld_asi(addr, asi, 8, 0);
3396
        *((uint32_t *)&env->fpr[rd++]) = u.l.upper;
3397
        *((uint32_t *)&env->fpr[rd++]) = u.l.lower;
3398
        u.ll = helper_ld_asi(addr + 8, asi, 8, 0);
3399
        *((uint32_t *)&env->fpr[rd++]) = u.l.upper;
3400
        *((uint32_t *)&env->fpr[rd++]) = u.l.lower;
3401
        break;
3402
    }
3403
}
3404

    
3405
void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
3406
{
3407
    unsigned int i;
3408
    target_ulong val = 0;
3409
    CPU_DoubleU u;
3410

    
3411
    helper_check_align(addr, 3);
3412
    addr = asi_address_mask(env, asi, addr);
3413

    
3414
    switch (asi) {
3415
    case 0xe0: /* UA2007/JPS1 Block commit store primary (cache flush) */
3416
    case 0xe1: /* UA2007/JPS1 Block commit store secondary (cache flush) */
3417
    case 0xf0: /* UA2007/JPS1 Block store primary */
3418
    case 0xf1: /* UA2007/JPS1 Block store secondary */
3419
    case 0xf8: /* UA2007/JPS1 Block store primary LE */
3420
    case 0xf9: /* UA2007/JPS1 Block store secondary LE */
3421
        if (rd & 7) {
3422
            raise_exception(TT_ILL_INSN);
3423
            return;
3424
        }
3425
        helper_check_align(addr, 0x3f);
3426
        for (i = 0; i < 16; i++) {
3427
            val = *(uint32_t *)&env->fpr[rd++];
3428
            helper_st_asi(addr, val, asi & 0x8f, 4);
3429
            addr += 4;
3430
        }
3431

    
3432
        return;
3433
    case 0x16: /* UA2007 Block load primary, user privilege */
3434
    case 0x17: /* UA2007 Block load secondary, user privilege */
3435
    case 0x1e: /* UA2007 Block load primary LE, user privilege */
3436
    case 0x1f: /* UA2007 Block load secondary LE, user privilege */
3437
    case 0x70: /* JPS1 Block store primary, user privilege */
3438
    case 0x71: /* JPS1 Block store secondary, user privilege */
3439
    case 0x78: /* JPS1 Block load primary LE, user privilege */
3440
    case 0x79: /* JPS1 Block load secondary LE, user privilege */
3441
        if (rd & 7) {
3442
            raise_exception(TT_ILL_INSN);
3443
            return;
3444
        }
3445
        helper_check_align(addr, 0x3f);
3446
        for (i = 0; i < 16; i++) {
3447
            val = *(uint32_t *)&env->fpr[rd++];
3448
            helper_st_asi(addr, val, asi & 0x19, 4);
3449
            addr += 4;
3450
        }
3451

    
3452
        return;
3453
    default:
3454
        break;
3455
    }
3456

    
3457
    switch(size) {
3458
    default:
3459
    case 4:
3460
        helper_st_asi(addr, *(uint32_t *)&env->fpr[rd], asi, size);
3461
        break;
3462
    case 8:
3463
        u.l.upper = *(uint32_t *)&env->fpr[rd++];
3464
        u.l.lower = *(uint32_t *)&env->fpr[rd++];
3465
        helper_st_asi(addr, u.ll, asi, size);
3466
        break;
3467
    case 16:
3468
        u.l.upper = *(uint32_t *)&env->fpr[rd++];
3469
        u.l.lower = *(uint32_t *)&env->fpr[rd++];
3470
        helper_st_asi(addr, u.ll, asi, 8);
3471
        u.l.upper = *(uint32_t *)&env->fpr[rd++];
3472
        u.l.lower = *(uint32_t *)&env->fpr[rd++];
3473
        helper_st_asi(addr + 8, u.ll, asi, 8);
3474
        break;
3475
    }
3476
}
3477

    
3478
target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
3479
                            target_ulong val2, uint32_t asi)
3480
{
3481
    target_ulong ret;
3482

    
3483
    val2 &= 0xffffffffUL;
3484
    ret = helper_ld_asi(addr, asi, 4, 0);
3485
    ret &= 0xffffffffUL;
3486
    if (val2 == ret)
3487
        helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4);
3488
    return ret;
3489
}
3490

    
3491
target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
3492
                             target_ulong val2, uint32_t asi)
3493
{
3494
    target_ulong ret;
3495

    
3496
    ret = helper_ld_asi(addr, asi, 8, 0);
3497
    if (val2 == ret)
3498
        helper_st_asi(addr, val1, asi, 8);
3499
    return ret;
3500
}
3501
#endif /* TARGET_SPARC64 */
3502

    
3503
#ifndef TARGET_SPARC64
3504
void helper_rett(void)
3505
{
3506
    unsigned int cwp;
3507

    
3508
    if (env->psret == 1)
3509
        raise_exception(TT_ILL_INSN);
3510

    
3511
    env->psret = 1;
3512
    cwp = cwp_inc(env->cwp + 1) ;
3513
    if (env->wim & (1 << cwp)) {
3514
        raise_exception(TT_WIN_UNF);
3515
    }
3516
    set_cwp(cwp);
3517
    env->psrs = env->psrps;
3518
}
3519
#endif
3520

    
3521
static target_ulong helper_udiv_common(target_ulong a, target_ulong b, int cc)
3522
{
3523
    int overflow = 0;
3524
    uint64_t x0;
3525
    uint32_t x1;
3526

    
3527
    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3528
    x1 = (b & 0xffffffff);
3529

    
3530
    if (x1 == 0) {
3531
        raise_exception(TT_DIV_ZERO);
3532
    }
3533

    
3534
    x0 = x0 / x1;
3535
    if (x0 > 0xffffffff) {
3536
        x0 = 0xffffffff;
3537
        overflow = 1;
3538
    }
3539

    
3540
    if (cc) {
3541
        env->cc_dst = x0;
3542
        env->cc_src2 = overflow;
3543
        env->cc_op = CC_OP_DIV;
3544
    }
3545
    return x0;
3546
}
3547

    
3548
target_ulong helper_udiv(target_ulong a, target_ulong b)
3549
{
3550
    return helper_udiv_common(a, b, 0);
3551
}
3552

    
3553
target_ulong helper_udiv_cc(target_ulong a, target_ulong b)
3554
{
3555
    return helper_udiv_common(a, b, 1);
3556
}
3557

    
3558
static target_ulong helper_sdiv_common(target_ulong a, target_ulong b, int cc)
3559
{
3560
    int overflow = 0;
3561
    int64_t x0;
3562
    int32_t x1;
3563

    
3564
    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3565
    x1 = (b & 0xffffffff);
3566

    
3567
    if (x1 == 0) {
3568
        raise_exception(TT_DIV_ZERO);
3569
    }
3570

    
3571
    x0 = x0 / x1;
3572
    if ((int32_t) x0 != x0) {
3573
        x0 = x0 < 0 ? 0x80000000: 0x7fffffff;
3574
        overflow = 1;
3575
    }
3576

    
3577
    if (cc) {
3578
        env->cc_dst = x0;
3579
        env->cc_src2 = overflow;
3580
        env->cc_op = CC_OP_DIV;
3581
    }
3582
    return x0;
3583
}
3584

    
3585
target_ulong helper_sdiv(target_ulong a, target_ulong b)
3586
{
3587
    return helper_sdiv_common(a, b, 0);
3588
}
3589

    
3590
target_ulong helper_sdiv_cc(target_ulong a, target_ulong b)
3591
{
3592
    return helper_sdiv_common(a, b, 1);
3593
}
3594

    
3595
void helper_stdf(target_ulong addr, int mem_idx)
3596
{
3597
    helper_check_align(addr, 7);
3598
#if !defined(CONFIG_USER_ONLY)
3599
    switch (mem_idx) {
3600
    case MMU_USER_IDX:
3601
        stfq_user(addr, DT0);
3602
        break;
3603
    case MMU_KERNEL_IDX:
3604
        stfq_kernel(addr, DT0);
3605
        break;
3606
#ifdef TARGET_SPARC64
3607
    case MMU_HYPV_IDX:
3608
        stfq_hypv(addr, DT0);
3609
        break;
3610
#endif
3611
    default:
3612
        DPRINTF_MMU("helper_stdf: need to check MMU idx %d\n", mem_idx);
3613
        break;
3614
    }
3615
#else
3616
    stfq_raw(address_mask(env, addr), DT0);
3617
#endif
3618
}
3619

    
3620
void helper_lddf(target_ulong addr, int mem_idx)
3621
{
3622
    helper_check_align(addr, 7);
3623
#if !defined(CONFIG_USER_ONLY)
3624
    switch (mem_idx) {
3625
    case MMU_USER_IDX:
3626
        DT0 = ldfq_user(addr);
3627
        break;
3628
    case MMU_KERNEL_IDX:
3629
        DT0 = ldfq_kernel(addr);
3630
        break;
3631
#ifdef TARGET_SPARC64
3632
    case MMU_HYPV_IDX:
3633
        DT0 = ldfq_hypv(addr);
3634
        break;
3635
#endif
3636
    default:
3637
        DPRINTF_MMU("helper_lddf: need to check MMU idx %d\n", mem_idx);
3638
        break;
3639
    }
3640
#else
3641
    DT0 = ldfq_raw(address_mask(env, addr));
3642
#endif
3643
}
3644

    
3645
void helper_ldqf(target_ulong addr, int mem_idx)
3646
{
3647
    // XXX add 128 bit load
3648
    CPU_QuadU u;
3649

    
3650
    helper_check_align(addr, 7);
3651
#if !defined(CONFIG_USER_ONLY)
3652
    switch (mem_idx) {
3653
    case MMU_USER_IDX:
3654
        u.ll.upper = ldq_user(addr);
3655
        u.ll.lower = ldq_user(addr + 8);
3656
        QT0 = u.q;
3657
        break;
3658
    case MMU_KERNEL_IDX:
3659
        u.ll.upper = ldq_kernel(addr);
3660
        u.ll.lower = ldq_kernel(addr + 8);
3661
        QT0 = u.q;
3662
        break;
3663
#ifdef TARGET_SPARC64
3664
    case MMU_HYPV_IDX:
3665
        u.ll.upper = ldq_hypv(addr);
3666
        u.ll.lower = ldq_hypv(addr + 8);
3667
        QT0 = u.q;
3668
        break;
3669
#endif
3670
    default:
3671
        DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx);
3672
        break;
3673
    }
3674
#else
3675
    u.ll.upper = ldq_raw(address_mask(env, addr));
3676
    u.ll.lower = ldq_raw(address_mask(env, addr + 8));
3677
    QT0 = u.q;
3678
#endif
3679
}
3680

    
3681
void helper_stqf(target_ulong addr, int mem_idx)
3682
{
3683
    // XXX add 128 bit store
3684
    CPU_QuadU u;
3685

    
3686
    helper_check_align(addr, 7);
3687
#if !defined(CONFIG_USER_ONLY)
3688
    switch (mem_idx) {
3689
    case MMU_USER_IDX:
3690
        u.q = QT0;
3691
        stq_user(addr, u.ll.upper);
3692
        stq_user(addr + 8, u.ll.lower);
3693
        break;
3694
    case MMU_KERNEL_IDX:
3695
        u.q = QT0;
3696
        stq_kernel(addr, u.ll.upper);
3697
        stq_kernel(addr + 8, u.ll.lower);
3698
        break;
3699
#ifdef TARGET_SPARC64
3700
    case MMU_HYPV_IDX:
3701
        u.q = QT0;
3702
        stq_hypv(addr, u.ll.upper);
3703
        stq_hypv(addr + 8, u.ll.lower);
3704
        break;
3705
#endif
3706
    default:
3707
        DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx);
3708
        break;
3709
    }
3710
#else
3711
    u.q = QT0;
3712
    stq_raw(address_mask(env, addr), u.ll.upper);
3713
    stq_raw(address_mask(env, addr + 8), u.ll.lower);
3714
#endif
3715
}
3716

    
3717
static inline void set_fsr(void)
3718
{
3719
    int rnd_mode;
3720

    
3721
    switch (env->fsr & FSR_RD_MASK) {
3722
    case FSR_RD_NEAREST:
3723
        rnd_mode = float_round_nearest_even;
3724
        break;
3725
    default:
3726
    case FSR_RD_ZERO:
3727
        rnd_mode = float_round_to_zero;
3728
        break;
3729
    case FSR_RD_POS:
3730
        rnd_mode = float_round_up;
3731
        break;
3732
    case FSR_RD_NEG:
3733
        rnd_mode = float_round_down;
3734
        break;
3735
    }
3736
    set_float_rounding_mode(rnd_mode, &env->fp_status);
3737
}
3738

    
3739
void helper_ldfsr(uint32_t new_fsr)
3740
{
3741
    env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK);
3742
    set_fsr();
3743
}
3744

    
3745
#ifdef TARGET_SPARC64
3746
void helper_ldxfsr(uint64_t new_fsr)
3747
{
3748
    env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK);
3749
    set_fsr();
3750
}
3751
#endif
3752

    
3753
void helper_debug(void)
3754
{
3755
    env->exception_index = EXCP_DEBUG;
3756
    cpu_loop_exit(env);
3757
}
3758

    
3759
#ifndef TARGET_SPARC64
3760
/* XXX: use another pointer for %iN registers to avoid slow wrapping
3761
   handling ? */
3762
void helper_save(void)
3763
{
3764
    uint32_t cwp;
3765

    
3766
    cwp = cwp_dec(env->cwp - 1);
3767
    if (env->wim & (1 << cwp)) {
3768
        raise_exception(TT_WIN_OVF);
3769
    }
3770
    set_cwp(cwp);
3771
}
3772

    
3773
void helper_restore(void)
3774
{
3775
    uint32_t cwp;
3776

    
3777
    cwp = cwp_inc(env->cwp + 1);
3778
    if (env->wim & (1 << cwp)) {
3779
        raise_exception(TT_WIN_UNF);
3780
    }
3781
    set_cwp(cwp);
3782
}
3783

    
3784
void helper_wrpsr(target_ulong new_psr)
3785
{
3786
    if ((new_psr & PSR_CWP) >= env->nwindows) {
3787
        raise_exception(TT_ILL_INSN);
3788
    } else {
3789
        cpu_put_psr(env, new_psr);
3790
    }
3791
}
3792

    
3793
target_ulong helper_rdpsr(void)
3794
{
3795
    return get_psr();
3796
}
3797

    
3798
#else
3799
/* XXX: use another pointer for %iN registers to avoid slow wrapping
3800
   handling ? */
3801
void helper_save(void)
3802
{
3803
    uint32_t cwp;
3804

    
3805
    cwp = cwp_dec(env->cwp - 1);
3806
    if (env->cansave == 0) {
3807
        raise_exception(TT_SPILL | (env->otherwin != 0 ?
3808
                                    (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3809
                                    ((env->wstate & 0x7) << 2)));
3810
    } else {
3811
        if (env->cleanwin - env->canrestore == 0) {
3812
            // XXX Clean windows without trap
3813
            raise_exception(TT_CLRWIN);
3814
        } else {
3815
            env->cansave--;
3816
            env->canrestore++;
3817
            set_cwp(cwp);
3818
        }
3819
    }
3820
}
3821

    
3822
void helper_restore(void)
3823
{
3824
    uint32_t cwp;
3825

    
3826
    cwp = cwp_inc(env->cwp + 1);
3827
    if (env->canrestore == 0) {
3828
        raise_exception(TT_FILL | (env->otherwin != 0 ?
3829
                                   (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3830
                                   ((env->wstate & 0x7) << 2)));
3831
    } else {
3832
        env->cansave++;
3833
        env->canrestore--;
3834
        set_cwp(cwp);
3835
    }
3836
}
3837

    
3838
void helper_flushw(void)
3839
{
3840
    if (env->cansave != env->nwindows - 2) {
3841
        raise_exception(TT_SPILL | (env->otherwin != 0 ?
3842
                                    (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3843
                                    ((env->wstate & 0x7) << 2)));
3844
    }
3845
}
3846

    
3847
void helper_saved(void)
3848
{
3849
    env->cansave++;
3850
    if (env->otherwin == 0)
3851
        env->canrestore--;
3852
    else
3853
        env->otherwin--;
3854
}
3855

    
3856
void helper_restored(void)
3857
{
3858
    env->canrestore++;
3859
    if (env->cleanwin < env->nwindows - 1)
3860
        env->cleanwin++;
3861
    if (env->otherwin == 0)
3862
        env->cansave--;
3863
    else
3864
        env->otherwin--;
3865
}
3866

    
3867
static target_ulong get_ccr(void)
3868
{
3869
    target_ulong psr;
3870

    
3871
    psr = get_psr();
3872

    
3873
    return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20);
3874
}
3875

    
3876
target_ulong cpu_get_ccr(CPUState *env1)
3877
{
3878
    CPUState *saved_env;
3879
    target_ulong ret;
3880

    
3881
    saved_env = env;
3882
    env = env1;
3883
    ret = get_ccr();
3884
    env = saved_env;
3885
    return ret;
3886
}
3887

    
3888
static void put_ccr(target_ulong val)
3889
{
3890
    target_ulong tmp = val;
3891

    
3892
    env->xcc = (tmp >> 4) << 20;
3893
    env->psr = (tmp & 0xf) << 20;
3894
    CC_OP = CC_OP_FLAGS;
3895
}
3896

    
3897
void cpu_put_ccr(CPUState *env1, target_ulong val)
3898
{
3899
    CPUState *saved_env;
3900

    
3901
    saved_env = env;
3902
    env = env1;
3903
    put_ccr(val);
3904
    env = saved_env;
3905
}
3906

    
3907
static target_ulong get_cwp64(void)
3908
{
3909
    return env->nwindows - 1 - env->cwp;
3910
}
3911

    
3912
target_ulong cpu_get_cwp64(CPUState *env1)
3913
{
3914
    CPUState *saved_env;
3915
    target_ulong ret;
3916

    
3917
    saved_env = env;
3918
    env = env1;
3919
    ret = get_cwp64();
3920
    env = saved_env;
3921
    return ret;
3922
}
3923

    
3924
static void put_cwp64(int cwp)
3925
{
3926
    if (unlikely(cwp >= env->nwindows || cwp < 0)) {
3927
        cwp %= env->nwindows;
3928
    }
3929
    set_cwp(env->nwindows - 1 - cwp);
3930
}
3931

    
3932
void cpu_put_cwp64(CPUState *env1, int cwp)
3933
{
3934
    CPUState *saved_env;
3935

    
3936
    saved_env = env;
3937
    env = env1;
3938
    put_cwp64(cwp);
3939
    env = saved_env;
3940
}
3941

    
3942
target_ulong helper_rdccr(void)
3943
{
3944
    return get_ccr();
3945
}
3946

    
3947
void helper_wrccr(target_ulong new_ccr)
3948
{
3949
    put_ccr(new_ccr);
3950
}
3951

    
3952
// CWP handling is reversed in V9, but we still use the V8 register
3953
// order.
3954
target_ulong helper_rdcwp(void)
3955
{
3956
    return get_cwp64();
3957
}
3958

    
3959
void helper_wrcwp(target_ulong new_cwp)
3960
{
3961
    put_cwp64(new_cwp);
3962
}
3963

    
3964
// This function uses non-native bit order
3965
#define GET_FIELD(X, FROM, TO)                                  \
3966
    ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
3967

    
3968
// This function uses the order in the manuals, i.e. bit 0 is 2^0
3969
#define GET_FIELD_SP(X, FROM, TO)               \
3970
    GET_FIELD(X, 63 - (TO), 63 - (FROM))
3971

    
3972
target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
3973
{
3974
    return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
3975
        (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
3976
        (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
3977
        (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
3978
        (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
3979
        (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
3980
        (((pixel_addr >> 55) & 1) << 4) |
3981
        (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
3982
        GET_FIELD_SP(pixel_addr, 11, 12);
3983
}
3984

    
3985
target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
3986
{
3987
    uint64_t tmp;
3988

    
3989
    tmp = addr + offset;
3990
    env->gsr &= ~7ULL;
3991
    env->gsr |= tmp & 7ULL;
3992
    return tmp & ~7ULL;
3993
}
3994

    
3995
target_ulong helper_popc(target_ulong val)
3996
{
3997
    return ctpop64(val);
3998
}
3999

    
4000
static inline uint64_t *get_gregset(uint32_t pstate)
4001
{
4002
    switch (pstate) {
4003
    default:
4004
        DPRINTF_PSTATE("ERROR in get_gregset: active pstate bits=%x%s%s%s\n",
4005
                pstate,
4006
                (pstate & PS_IG) ? " IG" : "",
4007
                (pstate & PS_MG) ? " MG" : "",
4008
                (pstate & PS_AG) ? " AG" : "");
4009
        /* pass through to normal set of global registers */
4010
    case 0:
4011
        return env->bgregs;
4012
    case PS_AG:
4013
        return env->agregs;
4014
    case PS_MG:
4015
        return env->mgregs;
4016
    case PS_IG:
4017
        return env->igregs;
4018
    }
4019
}
4020

    
4021
static inline void change_pstate(uint32_t new_pstate)
4022
{
4023
    uint32_t pstate_regs, new_pstate_regs;
4024
    uint64_t *src, *dst;
4025

    
4026
    if (env->def->features & CPU_FEATURE_GL) {
4027
        // PS_AG is not implemented in this case
4028
        new_pstate &= ~PS_AG;
4029
    }
4030

    
4031
    pstate_regs = env->pstate & 0xc01;
4032
    new_pstate_regs = new_pstate & 0xc01;
4033

    
4034
    if (new_pstate_regs != pstate_regs) {
4035
        DPRINTF_PSTATE("change_pstate: switching regs old=%x new=%x\n",
4036
                       pstate_regs, new_pstate_regs);
4037
        // Switch global register bank
4038
        src = get_gregset(new_pstate_regs);
4039
        dst = get_gregset(pstate_regs);
4040
        memcpy32(dst, env->gregs);
4041
        memcpy32(env->gregs, src);
4042
    }
4043
    else {
4044
        DPRINTF_PSTATE("change_pstate: regs new=%x (unchanged)\n",
4045
                       new_pstate_regs);
4046
    }
4047
    env->pstate = new_pstate;
4048
}
4049

    
4050
void helper_wrpstate(target_ulong new_state)
4051
{
4052
    change_pstate(new_state & 0xf3f);
4053

    
4054
#if !defined(CONFIG_USER_ONLY)
4055
    if (cpu_interrupts_enabled(env)) {
4056
        cpu_check_irqs(env);
4057
    }
4058
#endif
4059
}
4060

    
4061
void cpu_change_pstate(CPUState *env1, uint32_t new_pstate)
4062
{
4063
    CPUState *saved_env;
4064

    
4065
    saved_env = env;
4066
    env = env1;
4067
    change_pstate(new_pstate);
4068
    env = saved_env;
4069
}
4070

    
4071
void helper_wrpil(target_ulong new_pil)
4072
{
4073
#if !defined(CONFIG_USER_ONLY)
4074
    DPRINTF_PSTATE("helper_wrpil old=%x new=%x\n",
4075
                   env->psrpil, (uint32_t)new_pil);
4076

    
4077
    env->psrpil = new_pil;
4078

    
4079
    if (cpu_interrupts_enabled(env)) {
4080
        cpu_check_irqs(env);
4081
    }
4082
#endif
4083
}
4084

    
4085
void helper_done(void)
4086
{
4087
    trap_state* tsptr = cpu_tsptr(env);
4088

    
4089
    env->pc = tsptr->tnpc;
4090
    env->npc = tsptr->tnpc + 4;
4091
    put_ccr(tsptr->tstate >> 32);
4092
    env->asi = (tsptr->tstate >> 24) & 0xff;
4093
    change_pstate((tsptr->tstate >> 8) & 0xf3f);
4094
    put_cwp64(tsptr->tstate & 0xff);
4095
    env->tl--;
4096

    
4097
    DPRINTF_PSTATE("... helper_done tl=%d\n", env->tl);
4098

    
4099
#if !defined(CONFIG_USER_ONLY)
4100
    if (cpu_interrupts_enabled(env)) {
4101
        cpu_check_irqs(env);
4102
    }
4103
#endif
4104
}
4105

    
4106
void helper_retry(void)
4107
{
4108
    trap_state* tsptr = cpu_tsptr(env);
4109

    
4110
    env->pc = tsptr->tpc;
4111
    env->npc = tsptr->tnpc;
4112
    put_ccr(tsptr->tstate >> 32);
4113
    env->asi = (tsptr->tstate >> 24) & 0xff;
4114
    change_pstate((tsptr->tstate >> 8) & 0xf3f);
4115
    put_cwp64(tsptr->tstate & 0xff);
4116
    env->tl--;
4117

    
4118
    DPRINTF_PSTATE("... helper_retry tl=%d\n", env->tl);
4119

    
4120
#if !defined(CONFIG_USER_ONLY)
4121
    if (cpu_interrupts_enabled(env)) {
4122
        cpu_check_irqs(env);
4123
    }
4124
#endif
4125
}
4126

    
4127
static void do_modify_softint(const char* operation, uint32_t value)
4128
{
4129
    if (env->softint != value) {
4130
        env->softint = value;
4131
        DPRINTF_PSTATE(": %s new %08x\n", operation, env->softint);
4132
#if !defined(CONFIG_USER_ONLY)
4133
        if (cpu_interrupts_enabled(env)) {
4134
            cpu_check_irqs(env);
4135
        }
4136
#endif
4137
    }
4138
}
4139

    
4140
void helper_set_softint(uint64_t value)
4141
{
4142
    do_modify_softint("helper_set_softint", env->softint | (uint32_t)value);
4143
}
4144

    
4145
void helper_clear_softint(uint64_t value)
4146
{
4147
    do_modify_softint("helper_clear_softint", env->softint & (uint32_t)~value);
4148
}
4149

    
4150
void helper_write_softint(uint64_t value)
4151
{
4152
    do_modify_softint("helper_write_softint", (uint32_t)value);
4153
}
4154
#endif
4155

    
4156
#ifdef TARGET_SPARC64
4157
trap_state* cpu_tsptr(CPUState* env)
4158
{
4159
    return &env->ts[env->tl & MAXTL_MASK];
4160
}
4161
#endif
4162

    
4163
#if !defined(CONFIG_USER_ONLY)
4164

    
4165
static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4166
                                void *retaddr);
4167

    
4168
#define MMUSUFFIX _mmu
4169
#define ALIGNED_ONLY
4170

    
4171
#define SHIFT 0
4172
#include "softmmu_template.h"
4173

    
4174
#define SHIFT 1
4175
#include "softmmu_template.h"
4176

    
4177
#define SHIFT 2
4178
#include "softmmu_template.h"
4179

    
4180
#define SHIFT 3
4181
#include "softmmu_template.h"
4182

    
4183
/* XXX: make it generic ? */
4184
static void cpu_restore_state2(void *retaddr)
4185
{
4186
    TranslationBlock *tb;
4187
    unsigned long pc;
4188

    
4189
    if (retaddr) {
4190
        /* now we have a real cpu fault */
4191
        pc = (unsigned long)retaddr;
4192
        tb = tb_find_pc(pc);
4193
        if (tb) {
4194
            /* the PC is inside the translated code. It means that we have
4195
               a virtual CPU fault */
4196
            cpu_restore_state(tb, env, pc);
4197
        }
4198
    }
4199
}
4200

    
4201
static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4202
                                void *retaddr)
4203
{
4204
#ifdef DEBUG_UNALIGNED
4205
    printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
4206
           "\n", addr, env->pc);
4207
#endif
4208
    cpu_restore_state2(retaddr);
4209
    raise_exception(TT_UNALIGNED);
4210
}
4211

    
4212
/* try to fill the TLB and return an exception if error. If retaddr is
4213
   NULL, it means that the function was called in C code (i.e. not
4214
   from generated code or from helper.c) */
4215
/* XXX: fix it to restore all registers */
4216
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4217
{
4218
    int ret;
4219
    CPUState *saved_env;
4220

    
4221
    /* XXX: hack to restore env in all cases, even if not called from
4222
       generated code */
4223
    saved_env = env;
4224
    env = cpu_single_env;
4225

    
4226
    ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4227
    if (ret) {
4228
        cpu_restore_state2(retaddr);
4229
        cpu_loop_exit(env);
4230
    }
4231
    env = saved_env;
4232
}
4233

    
4234
#endif /* !CONFIG_USER_ONLY */
4235

    
4236
#ifndef TARGET_SPARC64
4237
#if !defined(CONFIG_USER_ONLY)
4238
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
4239
                          int is_asi, int size)
4240
{
4241
    CPUState *saved_env;
4242
    int fault_type;
4243

    
4244
    /* XXX: hack to restore env in all cases, even if not called from
4245
       generated code */
4246
    saved_env = env;
4247
    env = cpu_single_env;
4248
#ifdef DEBUG_UNASSIGNED
4249
    if (is_asi)
4250
        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4251
               " asi 0x%02x from " TARGET_FMT_lx "\n",
4252
               is_exec ? "exec" : is_write ? "write" : "read", size,
4253
               size == 1 ? "" : "s", addr, is_asi, env->pc);
4254
    else
4255
        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4256
               " from " TARGET_FMT_lx "\n",
4257
               is_exec ? "exec" : is_write ? "write" : "read", size,
4258
               size == 1 ? "" : "s", addr, env->pc);
4259
#endif
4260
    /* Don't overwrite translation and access faults */
4261
    fault_type = (env->mmuregs[3] & 0x1c) >> 2;
4262
    if ((fault_type > 4) || (fault_type == 0)) {
4263
        env->mmuregs[3] = 0; /* Fault status register */
4264
        if (is_asi)
4265
            env->mmuregs[3] |= 1 << 16;
4266
        if (env->psrs)
4267
            env->mmuregs[3] |= 1 << 5;
4268
        if (is_exec)
4269
            env->mmuregs[3] |= 1 << 6;
4270
        if (is_write)
4271
            env->mmuregs[3] |= 1 << 7;
4272
        env->mmuregs[3] |= (5 << 2) | 2;
4273
        /* SuperSPARC will never place instruction fault addresses in the FAR */
4274
        if (!is_exec) {
4275
            env->mmuregs[4] = addr; /* Fault address register */
4276
        }
4277
    }
4278
    /* overflow (same type fault was not read before another fault) */
4279
    if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
4280
        env->mmuregs[3] |= 1;
4281
    }
4282

    
4283
    if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
4284
        if (is_exec)
4285
            raise_exception(TT_CODE_ACCESS);
4286
        else
4287
            raise_exception(TT_DATA_ACCESS);
4288
    }
4289

    
4290
    /* flush neverland mappings created during no-fault mode,
4291
       so the sequential MMU faults report proper fault types */
4292
    if (env->mmuregs[0] & MMU_NF) {
4293
        tlb_flush(env, 1);
4294
    }
4295

    
4296
    env = saved_env;
4297
}
4298
#endif
4299
#else
4300
#if defined(CONFIG_USER_ONLY)
4301
static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
4302
                          int is_asi, int size)
4303
#else
4304
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
4305
                          int is_asi, int size)
4306
#endif
4307
{
4308
    CPUState *saved_env;
4309

    
4310
    /* XXX: hack to restore env in all cases, even if not called from
4311
       generated code */
4312
    saved_env = env;
4313
    env = cpu_single_env;
4314

    
4315
#ifdef DEBUG_UNASSIGNED
4316
    printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
4317
           "\n", addr, env->pc);
4318
#endif
4319

    
4320
    if (is_exec)
4321
        raise_exception(TT_CODE_ACCESS);
4322
    else
4323
        raise_exception(TT_DATA_ACCESS);
4324

    
4325
    env = saved_env;
4326
}
4327
#endif
4328

    
4329

    
4330
#ifdef TARGET_SPARC64
4331
void helper_tick_set_count(void *opaque, uint64_t count)
4332
{
4333
#if !defined(CONFIG_USER_ONLY)
4334
    cpu_tick_set_count(opaque, count);
4335
#endif
4336
}
4337

    
4338
uint64_t helper_tick_get_count(void *opaque)
4339
{
4340
#if !defined(CONFIG_USER_ONLY)
4341
    return cpu_tick_get_count(opaque);
4342
#else
4343
    return 0;
4344
#endif
4345
}
4346

    
4347
void helper_tick_set_limit(void *opaque, uint64_t limit)
4348
{
4349
#if !defined(CONFIG_USER_ONLY)
4350
    cpu_tick_set_limit(opaque, limit);
4351
#endif
4352
}
4353
#endif