Statistics
| Branch: | Revision:

root / target-sparc / op_helper.c @ 54a3c0f0

History | View | Annotate | Download (113.9 kB)

1
#include "exec.h"
2
#include "host-utils.h"
3
#include "helper.h"
4

    
5
//#define DEBUG_MMU
6
//#define DEBUG_MXCC
7
//#define DEBUG_UNALIGNED
8
//#define DEBUG_UNASSIGNED
9
//#define DEBUG_ASI
10
//#define DEBUG_PCALL
11
//#define DEBUG_PSTATE
12

    
13
#ifdef DEBUG_MMU
14
#define DPRINTF_MMU(fmt, ...)                                   \
15
    do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
16
#else
17
#define DPRINTF_MMU(fmt, ...) do {} while (0)
18
#endif
19

    
20
#ifdef DEBUG_MXCC
21
#define DPRINTF_MXCC(fmt, ...)                                  \
22
    do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
23
#else
24
#define DPRINTF_MXCC(fmt, ...) do {} while (0)
25
#endif
26

    
27
#ifdef DEBUG_ASI
28
#define DPRINTF_ASI(fmt, ...)                                   \
29
    do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
30
#endif
31

    
32
#ifdef DEBUG_PSTATE
33
#define DPRINTF_PSTATE(fmt, ...)                                   \
34
    do { printf("PSTATE: " fmt , ## __VA_ARGS__); } while (0)
35
#else
36
#define DPRINTF_PSTATE(fmt, ...) do {} while (0)
37
#endif
38

    
39
#ifdef TARGET_SPARC64
40
#ifndef TARGET_ABI32
41
#define AM_CHECK(env1) ((env1)->pstate & PS_AM)
42
#else
43
#define AM_CHECK(env1) (1)
44
#endif
45
#endif
46

    
47
#define DT0 (env->dt0)
48
#define DT1 (env->dt1)
49
#define QT0 (env->qt0)
50
#define QT1 (env->qt1)
51

    
52
#if defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
53
static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
54
                          int is_asi, int size);
55
#endif
56

    
57
#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
58
// Calculates TSB pointer value for fault page size 8k or 64k
59
static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
60
                                       uint64_t tag_access_register,
61
                                       int page_size)
62
{
63
    uint64_t tsb_base = tsb_register & ~0x1fffULL;
64
    int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
65
    int tsb_size  = tsb_register & 0xf;
66

    
67
    // discard lower 13 bits which hold tag access context
68
    uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
69

    
70
    // now reorder bits
71
    uint64_t tsb_base_mask = ~0x1fffULL;
72
    uint64_t va = tag_access_va;
73

    
74
    // move va bits to correct position
75
    if (page_size == 8*1024) {
76
        va >>= 9;
77
    } else if (page_size == 64*1024) {
78
        va >>= 12;
79
    }
80

    
81
    if (tsb_size) {
82
        tsb_base_mask <<= tsb_size;
83
    }
84

    
85
    // calculate tsb_base mask and adjust va if split is in use
86
    if (tsb_split) {
87
        if (page_size == 8*1024) {
88
            va &= ~(1ULL << (13 + tsb_size));
89
        } else if (page_size == 64*1024) {
90
            va |= (1ULL << (13 + tsb_size));
91
        }
92
        tsb_base_mask <<= 1;
93
    }
94

    
95
    return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
96
}
97

    
98
// Calculates tag target register value by reordering bits
99
// in tag access register
100
static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
101
{
102
    return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
103
}
104

    
105
static void replace_tlb_entry(SparcTLBEntry *tlb,
106
                              uint64_t tlb_tag, uint64_t tlb_tte,
107
                              CPUState *env1)
108
{
109
    target_ulong mask, size, va, offset;
110

    
111
    // flush page range if translation is valid
112
    if (TTE_IS_VALID(tlb->tte)) {
113

    
114
        mask = 0xffffffffffffe000ULL;
115
        mask <<= 3 * ((tlb->tte >> 61) & 3);
116
        size = ~mask + 1;
117

    
118
        va = tlb->tag & mask;
119

    
120
        for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
121
            tlb_flush_page(env1, va + offset);
122
        }
123
    }
124

    
125
    tlb->tag = tlb_tag;
126
    tlb->tte = tlb_tte;
127
}
128

    
129
static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
130
                      const char* strmmu, CPUState *env1)
131
{
132
    unsigned int i;
133
    target_ulong mask;
134
    uint64_t context;
135

    
136
    int is_demap_context = (demap_addr >> 6) & 1;
137

    
138
    // demap context
139
    switch ((demap_addr >> 4) & 3) {
140
    case 0: // primary
141
        context = env1->dmmu.mmu_primary_context;
142
        break;
143
    case 1: // secondary
144
        context = env1->dmmu.mmu_secondary_context;
145
        break;
146
    case 2: // nucleus
147
        context = 0;
148
        break;
149
    case 3: // reserved
150
    default:
151
        return;
152
    }
153

    
154
    for (i = 0; i < 64; i++) {
155
        if (TTE_IS_VALID(tlb[i].tte)) {
156

    
157
            if (is_demap_context) {
158
                // will remove non-global entries matching context value
159
                if (TTE_IS_GLOBAL(tlb[i].tte) ||
160
                    !tlb_compare_context(&tlb[i], context)) {
161
                    continue;
162
                }
163
            } else {
164
                // demap page
165
                // will remove any entry matching VA
166
                mask = 0xffffffffffffe000ULL;
167
                mask <<= 3 * ((tlb[i].tte >> 61) & 3);
168

    
169
                if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
170
                    continue;
171
                }
172

    
173
                // entry should be global or matching context value
174
                if (!TTE_IS_GLOBAL(tlb[i].tte) &&
175
                    !tlb_compare_context(&tlb[i], context)) {
176
                    continue;
177
                }
178
            }
179

    
180
            replace_tlb_entry(&tlb[i], 0, 0, env1);
181
#ifdef DEBUG_MMU
182
            DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
183
            dump_mmu(env1);
184
#endif
185
        }
186
    }
187
}
188

    
189
static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
190
                                 uint64_t tlb_tag, uint64_t tlb_tte,
191
                                 const char* strmmu, CPUState *env1)
192
{
193
    unsigned int i, replace_used;
194

    
195
    // Try replacing invalid entry
196
    for (i = 0; i < 64; i++) {
197
        if (!TTE_IS_VALID(tlb[i].tte)) {
198
            replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
199
#ifdef DEBUG_MMU
200
            DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
201
            dump_mmu(env1);
202
#endif
203
            return;
204
        }
205
    }
206

    
207
    // All entries are valid, try replacing unlocked entry
208

    
209
    for (replace_used = 0; replace_used < 2; ++replace_used) {
210

    
211
        // Used entries are not replaced on first pass
212

    
213
        for (i = 0; i < 64; i++) {
214
            if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
215

    
216
                replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
217
#ifdef DEBUG_MMU
218
                DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
219
                            strmmu, (replace_used?"used":"unused"), i);
220
                dump_mmu(env1);
221
#endif
222
                return;
223
            }
224
        }
225

    
226
        // Now reset used bit and search for unused entries again
227

    
228
        for (i = 0; i < 64; i++) {
229
            TTE_SET_UNUSED(tlb[i].tte);
230
        }
231
    }
232

    
233
#ifdef DEBUG_MMU
234
    DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
235
#endif
236
    // error state?
237
}
238

    
239
#endif
240

    
241
static inline target_ulong address_mask(CPUState *env1, target_ulong addr)
242
{
243
#ifdef TARGET_SPARC64
244
    if (AM_CHECK(env1))
245
        addr &= 0xffffffffULL;
246
#endif
247
    return addr;
248
}
249

    
250
static void raise_exception(int tt)
251
{
252
    env->exception_index = tt;
253
    cpu_loop_exit();
254
}
255

    
256
void HELPER(raise_exception)(int tt)
257
{
258
    raise_exception(tt);
259
}
260

    
261
void helper_check_align(target_ulong addr, uint32_t align)
262
{
263
    if (addr & align) {
264
#ifdef DEBUG_UNALIGNED
265
    printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
266
           "\n", addr, env->pc);
267
#endif
268
        raise_exception(TT_UNALIGNED);
269
    }
270
}
271

    
272
#define F_HELPER(name, p) void helper_f##name##p(void)
273

    
274
#define F_BINOP(name)                                           \
275
    float32 helper_f ## name ## s (float32 src1, float32 src2)  \
276
    {                                                           \
277
        return float32_ ## name (src1, src2, &env->fp_status);  \
278
    }                                                           \
279
    F_HELPER(name, d)                                           \
280
    {                                                           \
281
        DT0 = float64_ ## name (DT0, DT1, &env->fp_status);     \
282
    }                                                           \
283
    F_HELPER(name, q)                                           \
284
    {                                                           \
285
        QT0 = float128_ ## name (QT0, QT1, &env->fp_status);    \
286
    }
287

    
288
F_BINOP(add);
289
F_BINOP(sub);
290
F_BINOP(mul);
291
F_BINOP(div);
292
#undef F_BINOP
293

    
294
void helper_fsmuld(float32 src1, float32 src2)
295
{
296
    DT0 = float64_mul(float32_to_float64(src1, &env->fp_status),
297
                      float32_to_float64(src2, &env->fp_status),
298
                      &env->fp_status);
299
}
300

    
301
void helper_fdmulq(void)
302
{
303
    QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
304
                       float64_to_float128(DT1, &env->fp_status),
305
                       &env->fp_status);
306
}
307

    
308
float32 helper_fnegs(float32 src)
309
{
310
    return float32_chs(src);
311
}
312

    
313
#ifdef TARGET_SPARC64
314
F_HELPER(neg, d)
315
{
316
    DT0 = float64_chs(DT1);
317
}
318

    
319
F_HELPER(neg, q)
320
{
321
    QT0 = float128_chs(QT1);
322
}
323
#endif
324

    
325
/* Integer to float conversion.  */
326
float32 helper_fitos(int32_t src)
327
{
328
    return int32_to_float32(src, &env->fp_status);
329
}
330

    
331
void helper_fitod(int32_t src)
332
{
333
    DT0 = int32_to_float64(src, &env->fp_status);
334
}
335

    
336
void helper_fitoq(int32_t src)
337
{
338
    QT0 = int32_to_float128(src, &env->fp_status);
339
}
340

    
341
#ifdef TARGET_SPARC64
342
float32 helper_fxtos(void)
343
{
344
    return int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
345
}
346

    
347
F_HELPER(xto, d)
348
{
349
    DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
350
}
351

    
352
F_HELPER(xto, q)
353
{
354
    QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
355
}
356
#endif
357
#undef F_HELPER
358

    
359
/* floating point conversion */
360
float32 helper_fdtos(void)
361
{
362
    return float64_to_float32(DT1, &env->fp_status);
363
}
364

    
365
void helper_fstod(float32 src)
366
{
367
    DT0 = float32_to_float64(src, &env->fp_status);
368
}
369

    
370
float32 helper_fqtos(void)
371
{
372
    return float128_to_float32(QT1, &env->fp_status);
373
}
374

    
375
void helper_fstoq(float32 src)
376
{
377
    QT0 = float32_to_float128(src, &env->fp_status);
378
}
379

    
380
void helper_fqtod(void)
381
{
382
    DT0 = float128_to_float64(QT1, &env->fp_status);
383
}
384

    
385
void helper_fdtoq(void)
386
{
387
    QT0 = float64_to_float128(DT1, &env->fp_status);
388
}
389

    
390
/* Float to integer conversion.  */
391
int32_t helper_fstoi(float32 src)
392
{
393
    return float32_to_int32_round_to_zero(src, &env->fp_status);
394
}
395

    
396
int32_t helper_fdtoi(void)
397
{
398
    return float64_to_int32_round_to_zero(DT1, &env->fp_status);
399
}
400

    
401
int32_t helper_fqtoi(void)
402
{
403
    return float128_to_int32_round_to_zero(QT1, &env->fp_status);
404
}
405

    
406
#ifdef TARGET_SPARC64
407
void helper_fstox(float32 src)
408
{
409
    *((int64_t *)&DT0) = float32_to_int64_round_to_zero(src, &env->fp_status);
410
}
411

    
412
void helper_fdtox(void)
413
{
414
    *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
415
}
416

    
417
void helper_fqtox(void)
418
{
419
    *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
420
}
421

    
422
void helper_faligndata(void)
423
{
424
    uint64_t tmp;
425

    
426
    tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
427
    /* on many architectures a shift of 64 does nothing */
428
    if ((env->gsr & 7) != 0) {
429
        tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
430
    }
431
    *((uint64_t *)&DT0) = tmp;
432
}
433

    
434
#ifdef HOST_WORDS_BIGENDIAN
435
#define VIS_B64(n) b[7 - (n)]
436
#define VIS_W64(n) w[3 - (n)]
437
#define VIS_SW64(n) sw[3 - (n)]
438
#define VIS_L64(n) l[1 - (n)]
439
#define VIS_B32(n) b[3 - (n)]
440
#define VIS_W32(n) w[1 - (n)]
441
#else
442
#define VIS_B64(n) b[n]
443
#define VIS_W64(n) w[n]
444
#define VIS_SW64(n) sw[n]
445
#define VIS_L64(n) l[n]
446
#define VIS_B32(n) b[n]
447
#define VIS_W32(n) w[n]
448
#endif
449

    
450
typedef union {
451
    uint8_t b[8];
452
    uint16_t w[4];
453
    int16_t sw[4];
454
    uint32_t l[2];
455
    float64 d;
456
} vis64;
457

    
458
typedef union {
459
    uint8_t b[4];
460
    uint16_t w[2];
461
    uint32_t l;
462
    float32 f;
463
} vis32;
464

    
465
void helper_fpmerge(void)
466
{
467
    vis64 s, d;
468

    
469
    s.d = DT0;
470
    d.d = DT1;
471

    
472
    // Reverse calculation order to handle overlap
473
    d.VIS_B64(7) = s.VIS_B64(3);
474
    d.VIS_B64(6) = d.VIS_B64(3);
475
    d.VIS_B64(5) = s.VIS_B64(2);
476
    d.VIS_B64(4) = d.VIS_B64(2);
477
    d.VIS_B64(3) = s.VIS_B64(1);
478
    d.VIS_B64(2) = d.VIS_B64(1);
479
    d.VIS_B64(1) = s.VIS_B64(0);
480
    //d.VIS_B64(0) = d.VIS_B64(0);
481

    
482
    DT0 = d.d;
483
}
484

    
485
void helper_fmul8x16(void)
486
{
487
    vis64 s, d;
488
    uint32_t tmp;
489

    
490
    s.d = DT0;
491
    d.d = DT1;
492

    
493
#define PMUL(r)                                                 \
494
    tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r);       \
495
    if ((tmp & 0xff) > 0x7f)                                    \
496
        tmp += 0x100;                                           \
497
    d.VIS_W64(r) = tmp >> 8;
498

    
499
    PMUL(0);
500
    PMUL(1);
501
    PMUL(2);
502
    PMUL(3);
503
#undef PMUL
504

    
505
    DT0 = d.d;
506
}
507

    
508
void helper_fmul8x16al(void)
509
{
510
    vis64 s, d;
511
    uint32_t tmp;
512

    
513
    s.d = DT0;
514
    d.d = DT1;
515

    
516
#define PMUL(r)                                                 \
517
    tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r);       \
518
    if ((tmp & 0xff) > 0x7f)                                    \
519
        tmp += 0x100;                                           \
520
    d.VIS_W64(r) = tmp >> 8;
521

    
522
    PMUL(0);
523
    PMUL(1);
524
    PMUL(2);
525
    PMUL(3);
526
#undef PMUL
527

    
528
    DT0 = d.d;
529
}
530

    
531
void helper_fmul8x16au(void)
532
{
533
    vis64 s, d;
534
    uint32_t tmp;
535

    
536
    s.d = DT0;
537
    d.d = DT1;
538

    
539
#define PMUL(r)                                                 \
540
    tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r);       \
541
    if ((tmp & 0xff) > 0x7f)                                    \
542
        tmp += 0x100;                                           \
543
    d.VIS_W64(r) = tmp >> 8;
544

    
545
    PMUL(0);
546
    PMUL(1);
547
    PMUL(2);
548
    PMUL(3);
549
#undef PMUL
550

    
551
    DT0 = d.d;
552
}
553

    
554
void helper_fmul8sux16(void)
555
{
556
    vis64 s, d;
557
    uint32_t tmp;
558

    
559
    s.d = DT0;
560
    d.d = DT1;
561

    
562
#define PMUL(r)                                                         \
563
    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
564
    if ((tmp & 0xff) > 0x7f)                                            \
565
        tmp += 0x100;                                                   \
566
    d.VIS_W64(r) = tmp >> 8;
567

    
568
    PMUL(0);
569
    PMUL(1);
570
    PMUL(2);
571
    PMUL(3);
572
#undef PMUL
573

    
574
    DT0 = d.d;
575
}
576

    
577
void helper_fmul8ulx16(void)
578
{
579
    vis64 s, d;
580
    uint32_t tmp;
581

    
582
    s.d = DT0;
583
    d.d = DT1;
584

    
585
#define PMUL(r)                                                         \
586
    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
587
    if ((tmp & 0xff) > 0x7f)                                            \
588
        tmp += 0x100;                                                   \
589
    d.VIS_W64(r) = tmp >> 8;
590

    
591
    PMUL(0);
592
    PMUL(1);
593
    PMUL(2);
594
    PMUL(3);
595
#undef PMUL
596

    
597
    DT0 = d.d;
598
}
599

    
600
void helper_fmuld8sux16(void)
601
{
602
    vis64 s, d;
603
    uint32_t tmp;
604

    
605
    s.d = DT0;
606
    d.d = DT1;
607

    
608
#define PMUL(r)                                                         \
609
    tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8);       \
610
    if ((tmp & 0xff) > 0x7f)                                            \
611
        tmp += 0x100;                                                   \
612
    d.VIS_L64(r) = tmp;
613

    
614
    // Reverse calculation order to handle overlap
615
    PMUL(1);
616
    PMUL(0);
617
#undef PMUL
618

    
619
    DT0 = d.d;
620
}
621

    
622
void helper_fmuld8ulx16(void)
623
{
624
    vis64 s, d;
625
    uint32_t tmp;
626

    
627
    s.d = DT0;
628
    d.d = DT1;
629

    
630
#define PMUL(r)                                                         \
631
    tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2));        \
632
    if ((tmp & 0xff) > 0x7f)                                            \
633
        tmp += 0x100;                                                   \
634
    d.VIS_L64(r) = tmp;
635

    
636
    // Reverse calculation order to handle overlap
637
    PMUL(1);
638
    PMUL(0);
639
#undef PMUL
640

    
641
    DT0 = d.d;
642
}
643

    
644
void helper_fexpand(void)
645
{
646
    vis32 s;
647
    vis64 d;
648

    
649
    s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
650
    d.d = DT1;
651
    d.VIS_W64(0) = s.VIS_B32(0) << 4;
652
    d.VIS_W64(1) = s.VIS_B32(1) << 4;
653
    d.VIS_W64(2) = s.VIS_B32(2) << 4;
654
    d.VIS_W64(3) = s.VIS_B32(3) << 4;
655

    
656
    DT0 = d.d;
657
}
658

    
659
#define VIS_HELPER(name, F)                             \
660
    void name##16(void)                                 \
661
    {                                                   \
662
        vis64 s, d;                                     \
663
                                                        \
664
        s.d = DT0;                                      \
665
        d.d = DT1;                                      \
666
                                                        \
667
        d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0));   \
668
        d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1));   \
669
        d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2));   \
670
        d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3));   \
671
                                                        \
672
        DT0 = d.d;                                      \
673
    }                                                   \
674
                                                        \
675
    uint32_t name##16s(uint32_t src1, uint32_t src2)    \
676
    {                                                   \
677
        vis32 s, d;                                     \
678
                                                        \
679
        s.l = src1;                                     \
680
        d.l = src2;                                     \
681
                                                        \
682
        d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0));   \
683
        d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1));   \
684
                                                        \
685
        return d.l;                                     \
686
    }                                                   \
687
                                                        \
688
    void name##32(void)                                 \
689
    {                                                   \
690
        vis64 s, d;                                     \
691
                                                        \
692
        s.d = DT0;                                      \
693
        d.d = DT1;                                      \
694
                                                        \
695
        d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0));   \
696
        d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1));   \
697
                                                        \
698
        DT0 = d.d;                                      \
699
    }                                                   \
700
                                                        \
701
    uint32_t name##32s(uint32_t src1, uint32_t src2)    \
702
    {                                                   \
703
        vis32 s, d;                                     \
704
                                                        \
705
        s.l = src1;                                     \
706
        d.l = src2;                                     \
707
                                                        \
708
        d.l = F(d.l, s.l);                              \
709
                                                        \
710
        return d.l;                                     \
711
    }
712

    
713
#define FADD(a, b) ((a) + (b))
714
#define FSUB(a, b) ((a) - (b))
715
VIS_HELPER(helper_fpadd, FADD)
716
VIS_HELPER(helper_fpsub, FSUB)
717

    
718
#define VIS_CMPHELPER(name, F)                                        \
719
    void name##16(void)                                           \
720
    {                                                             \
721
        vis64 s, d;                                               \
722
                                                                  \
723
        s.d = DT0;                                                \
724
        d.d = DT1;                                                \
725
                                                                  \
726
        d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0;       \
727
        d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0;      \
728
        d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0;      \
729
        d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0;      \
730
                                                                  \
731
        DT0 = d.d;                                                \
732
    }                                                             \
733
                                                                  \
734
    void name##32(void)                                           \
735
    {                                                             \
736
        vis64 s, d;                                               \
737
                                                                  \
738
        s.d = DT0;                                                \
739
        d.d = DT1;                                                \
740
                                                                  \
741
        d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0;       \
742
        d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0;      \
743
                                                                  \
744
        DT0 = d.d;                                                \
745
    }
746

    
747
#define FCMPGT(a, b) ((a) > (b))
748
#define FCMPEQ(a, b) ((a) == (b))
749
#define FCMPLE(a, b) ((a) <= (b))
750
#define FCMPNE(a, b) ((a) != (b))
751

    
752
VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
753
VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
754
VIS_CMPHELPER(helper_fcmple, FCMPLE)
755
VIS_CMPHELPER(helper_fcmpne, FCMPNE)
756
#endif
757

    
758
void helper_check_ieee_exceptions(void)
759
{
760
    target_ulong status;
761

    
762
    status = get_float_exception_flags(&env->fp_status);
763
    if (status) {
764
        /* Copy IEEE 754 flags into FSR */
765
        if (status & float_flag_invalid)
766
            env->fsr |= FSR_NVC;
767
        if (status & float_flag_overflow)
768
            env->fsr |= FSR_OFC;
769
        if (status & float_flag_underflow)
770
            env->fsr |= FSR_UFC;
771
        if (status & float_flag_divbyzero)
772
            env->fsr |= FSR_DZC;
773
        if (status & float_flag_inexact)
774
            env->fsr |= FSR_NXC;
775

    
776
        if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
777
            /* Unmasked exception, generate a trap */
778
            env->fsr |= FSR_FTT_IEEE_EXCP;
779
            raise_exception(TT_FP_EXCP);
780
        } else {
781
            /* Accumulate exceptions */
782
            env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
783
        }
784
    }
785
}
786

    
787
void helper_clear_float_exceptions(void)
788
{
789
    set_float_exception_flags(0, &env->fp_status);
790
}
791

    
792
float32 helper_fabss(float32 src)
793
{
794
    return float32_abs(src);
795
}
796

    
797
#ifdef TARGET_SPARC64
798
void helper_fabsd(void)
799
{
800
    DT0 = float64_abs(DT1);
801
}
802

    
803
void helper_fabsq(void)
804
{
805
    QT0 = float128_abs(QT1);
806
}
807
#endif
808

    
809
float32 helper_fsqrts(float32 src)
810
{
811
    return float32_sqrt(src, &env->fp_status);
812
}
813

    
814
void helper_fsqrtd(void)
815
{
816
    DT0 = float64_sqrt(DT1, &env->fp_status);
817
}
818

    
819
void helper_fsqrtq(void)
820
{
821
    QT0 = float128_sqrt(QT1, &env->fp_status);
822
}
823

    
824
#define GEN_FCMP(name, size, reg1, reg2, FS, TRAP)                      \
825
    void glue(helper_, name) (void)                                     \
826
    {                                                                   \
827
        target_ulong new_fsr;                                           \
828
                                                                        \
829
        env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                     \
830
        switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) {   \
831
        case float_relation_unordered:                                  \
832
            new_fsr = (FSR_FCC1 | FSR_FCC0) << FS;                      \
833
            if ((env->fsr & FSR_NVM) || TRAP) {                         \
834
                env->fsr |= new_fsr;                                    \
835
                env->fsr |= FSR_NVC;                                    \
836
                env->fsr |= FSR_FTT_IEEE_EXCP;                          \
837
                raise_exception(TT_FP_EXCP);                            \
838
            } else {                                                    \
839
                env->fsr |= FSR_NVA;                                    \
840
            }                                                           \
841
            break;                                                      \
842
        case float_relation_less:                                       \
843
            new_fsr = FSR_FCC0 << FS;                                   \
844
            break;                                                      \
845
        case float_relation_greater:                                    \
846
            new_fsr = FSR_FCC1 << FS;                                   \
847
            break;                                                      \
848
        default:                                                        \
849
            new_fsr = 0;                                                \
850
            break;                                                      \
851
        }                                                               \
852
        env->fsr |= new_fsr;                                            \
853
    }
854
#define GEN_FCMPS(name, size, FS, TRAP)                                 \
855
    void glue(helper_, name)(float32 src1, float32 src2)                \
856
    {                                                                   \
857
        target_ulong new_fsr;                                           \
858
                                                                        \
859
        env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS);                     \
860
        switch (glue(size, _compare) (src1, src2, &env->fp_status)) {   \
861
        case float_relation_unordered:                                  \
862
            new_fsr = (FSR_FCC1 | FSR_FCC0) << FS;                      \
863
            if ((env->fsr & FSR_NVM) || TRAP) {                         \
864
                env->fsr |= new_fsr;                                    \
865
                env->fsr |= FSR_NVC;                                    \
866
                env->fsr |= FSR_FTT_IEEE_EXCP;                          \
867
                raise_exception(TT_FP_EXCP);                            \
868
            } else {                                                    \
869
                env->fsr |= FSR_NVA;                                    \
870
            }                                                           \
871
            break;                                                      \
872
        case float_relation_less:                                       \
873
            new_fsr = FSR_FCC0 << FS;                                   \
874
            break;                                                      \
875
        case float_relation_greater:                                    \
876
            new_fsr = FSR_FCC1 << FS;                                   \
877
            break;                                                      \
878
        default:                                                        \
879
            new_fsr = 0;                                                \
880
            break;                                                      \
881
        }                                                               \
882
        env->fsr |= new_fsr;                                            \
883
    }
884

    
885
GEN_FCMPS(fcmps, float32, 0, 0);
886
GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
887

    
888
GEN_FCMPS(fcmpes, float32, 0, 1);
889
GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
890

    
891
GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
892
GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
893

    
894
static uint32_t compute_all_flags(void)
895
{
896
    return env->psr & PSR_ICC;
897
}
898

    
899
static uint32_t compute_C_flags(void)
900
{
901
    return env->psr & PSR_CARRY;
902
}
903

    
904
static inline uint32_t get_NZ_icc(int32_t dst)
905
{
906
    uint32_t ret = 0;
907

    
908
    if (dst == 0) {
909
        ret = PSR_ZERO;
910
    } else if (dst < 0) {
911
        ret = PSR_NEG;
912
    }
913
    return ret;
914
}
915

    
916
#ifdef TARGET_SPARC64
917
static uint32_t compute_all_flags_xcc(void)
918
{
919
    return env->xcc & PSR_ICC;
920
}
921

    
922
static uint32_t compute_C_flags_xcc(void)
923
{
924
    return env->xcc & PSR_CARRY;
925
}
926

    
927
static inline uint32_t get_NZ_xcc(target_long dst)
928
{
929
    uint32_t ret = 0;
930

    
931
    if (!dst) {
932
        ret = PSR_ZERO;
933
    } else if (dst < 0) {
934
        ret = PSR_NEG;
935
    }
936
    return ret;
937
}
938
#endif
939

    
940
static inline uint32_t get_V_div_icc(target_ulong src2)
941
{
942
    uint32_t ret = 0;
943

    
944
    if (src2 != 0) {
945
        ret = PSR_OVF;
946
    }
947
    return ret;
948
}
949

    
950
static uint32_t compute_all_div(void)
951
{
952
    uint32_t ret;
953

    
954
    ret = get_NZ_icc(CC_DST);
955
    ret |= get_V_div_icc(CC_SRC2);
956
    return ret;
957
}
958

    
959
static uint32_t compute_C_div(void)
960
{
961
    return 0;
962
}
963

    
964
static inline uint32_t get_C_add_icc(uint32_t dst, uint32_t src1)
965
{
966
    uint32_t ret = 0;
967

    
968
    if (dst < src1) {
969
        ret = PSR_CARRY;
970
    }
971
    return ret;
972
}
973

    
974
static inline uint32_t get_C_addx_icc(uint32_t dst, uint32_t src1,
975
                                      uint32_t src2)
976
{
977
    uint32_t ret = 0;
978

    
979
    if (((src1 & src2) | (~dst & (src1 | src2))) & (1U << 31)) {
980
        ret = PSR_CARRY;
981
    }
982
    return ret;
983
}
984

    
985
static inline uint32_t get_V_add_icc(uint32_t dst, uint32_t src1,
986
                                     uint32_t src2)
987
{
988
    uint32_t ret = 0;
989

    
990
    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1U << 31)) {
991
        ret = PSR_OVF;
992
    }
993
    return ret;
994
}
995

    
996
#ifdef TARGET_SPARC64
997
static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1)
998
{
999
    uint32_t ret = 0;
1000

    
1001
    if (dst < src1) {
1002
        ret = PSR_CARRY;
1003
    }
1004
    return ret;
1005
}
1006

    
1007
static inline uint32_t get_C_addx_xcc(target_ulong dst, target_ulong src1,
1008
                                      target_ulong src2)
1009
{
1010
    uint32_t ret = 0;
1011

    
1012
    if (((src1 & src2) | (~dst & (src1 | src2))) & (1ULL << 63)) {
1013
        ret = PSR_CARRY;
1014
    }
1015
    return ret;
1016
}
1017

    
1018
static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1,
1019
                                         target_ulong src2)
1020
{
1021
    uint32_t ret = 0;
1022

    
1023
    if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63)) {
1024
        ret = PSR_OVF;
1025
    }
1026
    return ret;
1027
}
1028

    
1029
static uint32_t compute_all_add_xcc(void)
1030
{
1031
    uint32_t ret;
1032

    
1033
    ret = get_NZ_xcc(CC_DST);
1034
    ret |= get_C_add_xcc(CC_DST, CC_SRC);
1035
    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1036
    return ret;
1037
}
1038

    
1039
static uint32_t compute_C_add_xcc(void)
1040
{
1041
    return get_C_add_xcc(CC_DST, CC_SRC);
1042
}
1043
#endif
1044

    
1045
static uint32_t compute_all_add(void)
1046
{
1047
    uint32_t ret;
1048

    
1049
    ret = get_NZ_icc(CC_DST);
1050
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1051
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1052
    return ret;
1053
}
1054

    
1055
static uint32_t compute_C_add(void)
1056
{
1057
    return get_C_add_icc(CC_DST, CC_SRC);
1058
}
1059

    
1060
#ifdef TARGET_SPARC64
1061
static uint32_t compute_all_addx_xcc(void)
1062
{
1063
    uint32_t ret;
1064

    
1065
    ret = get_NZ_xcc(CC_DST);
1066
    ret |= get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1067
    ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1068
    return ret;
1069
}
1070

    
1071
static uint32_t compute_C_addx_xcc(void)
1072
{
1073
    uint32_t ret;
1074

    
1075
    ret = get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1076
    return ret;
1077
}
1078
#endif
1079

    
1080
static uint32_t compute_all_addx(void)
1081
{
1082
    uint32_t ret;
1083

    
1084
    ret = get_NZ_icc(CC_DST);
1085
    ret |= get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1086
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1087
    return ret;
1088
}
1089

    
1090
static uint32_t compute_C_addx(void)
1091
{
1092
    uint32_t ret;
1093

    
1094
    ret = get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1095
    return ret;
1096
}
1097

    
1098
static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2)
1099
{
1100
    uint32_t ret = 0;
1101

    
1102
    if ((src1 | src2) & 0x3) {
1103
        ret = PSR_OVF;
1104
    }
1105
    return ret;
1106
}
1107

    
1108
static uint32_t compute_all_tadd(void)
1109
{
1110
    uint32_t ret;
1111

    
1112
    ret = get_NZ_icc(CC_DST);
1113
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1114
    ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1115
    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1116
    return ret;
1117
}
1118

    
1119
static uint32_t compute_all_taddtv(void)
1120
{
1121
    uint32_t ret;
1122

    
1123
    ret = get_NZ_icc(CC_DST);
1124
    ret |= get_C_add_icc(CC_DST, CC_SRC);
1125
    return ret;
1126
}
1127

    
1128
static inline uint32_t get_C_sub_icc(uint32_t src1, uint32_t src2)
1129
{
1130
    uint32_t ret = 0;
1131

    
1132
    if (src1 < src2) {
1133
        ret = PSR_CARRY;
1134
    }
1135
    return ret;
1136
}
1137

    
1138
static inline uint32_t get_C_subx_icc(uint32_t dst, uint32_t src1,
1139
                                      uint32_t src2)
1140
{
1141
    uint32_t ret = 0;
1142

    
1143
    if (((~src1 & src2) | (dst & (~src1 | src2))) & (1U << 31)) {
1144
        ret = PSR_CARRY;
1145
    }
1146
    return ret;
1147
}
1148

    
1149
static inline uint32_t get_V_sub_icc(uint32_t dst, uint32_t src1,
1150
                                     uint32_t src2)
1151
{
1152
    uint32_t ret = 0;
1153

    
1154
    if (((src1 ^ src2) & (src1 ^ dst)) & (1U << 31)) {
1155
        ret = PSR_OVF;
1156
    }
1157
    return ret;
1158
}
1159

    
1160

    
1161
#ifdef TARGET_SPARC64
1162
static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2)
1163
{
1164
    uint32_t ret = 0;
1165

    
1166
    if (src1 < src2) {
1167
        ret = PSR_CARRY;
1168
    }
1169
    return ret;
1170
}
1171

    
1172
static inline uint32_t get_C_subx_xcc(target_ulong dst, target_ulong src1,
1173
                                      target_ulong src2)
1174
{
1175
    uint32_t ret = 0;
1176

    
1177
    if (((~src1 & src2) | (dst & (~src1 | src2))) & (1ULL << 63)) {
1178
        ret = PSR_CARRY;
1179
    }
1180
    return ret;
1181
}
1182

    
1183
static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1,
1184
                                     target_ulong src2)
1185
{
1186
    uint32_t ret = 0;
1187

    
1188
    if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63)) {
1189
        ret = PSR_OVF;
1190
    }
1191
    return ret;
1192
}
1193

    
1194
static uint32_t compute_all_sub_xcc(void)
1195
{
1196
    uint32_t ret;
1197

    
1198
    ret = get_NZ_xcc(CC_DST);
1199
    ret |= get_C_sub_xcc(CC_SRC, CC_SRC2);
1200
    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1201
    return ret;
1202
}
1203

    
1204
static uint32_t compute_C_sub_xcc(void)
1205
{
1206
    return get_C_sub_xcc(CC_SRC, CC_SRC2);
1207
}
1208
#endif
1209

    
1210
static uint32_t compute_all_sub(void)
1211
{
1212
    uint32_t ret;
1213

    
1214
    ret = get_NZ_icc(CC_DST);
1215
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1216
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1217
    return ret;
1218
}
1219

    
1220
static uint32_t compute_C_sub(void)
1221
{
1222
    return get_C_sub_icc(CC_SRC, CC_SRC2);
1223
}
1224

    
1225
#ifdef TARGET_SPARC64
1226
static uint32_t compute_all_subx_xcc(void)
1227
{
1228
    uint32_t ret;
1229

    
1230
    ret = get_NZ_xcc(CC_DST);
1231
    ret |= get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1232
    ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1233
    return ret;
1234
}
1235

    
1236
static uint32_t compute_C_subx_xcc(void)
1237
{
1238
    uint32_t ret;
1239

    
1240
    ret = get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1241
    return ret;
1242
}
1243
#endif
1244

    
1245
static uint32_t compute_all_subx(void)
1246
{
1247
    uint32_t ret;
1248

    
1249
    ret = get_NZ_icc(CC_DST);
1250
    ret |= get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1251
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1252
    return ret;
1253
}
1254

    
1255
static uint32_t compute_C_subx(void)
1256
{
1257
    uint32_t ret;
1258

    
1259
    ret = get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1260
    return ret;
1261
}
1262

    
1263
static uint32_t compute_all_tsub(void)
1264
{
1265
    uint32_t ret;
1266

    
1267
    ret = get_NZ_icc(CC_DST);
1268
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1269
    ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1270
    ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1271
    return ret;
1272
}
1273

    
1274
static uint32_t compute_all_tsubtv(void)
1275
{
1276
    uint32_t ret;
1277

    
1278
    ret = get_NZ_icc(CC_DST);
1279
    ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1280
    return ret;
1281
}
1282

    
1283
static uint32_t compute_all_logic(void)
1284
{
1285
    return get_NZ_icc(CC_DST);
1286
}
1287

    
1288
static uint32_t compute_C_logic(void)
1289
{
1290
    return 0;
1291
}
1292

    
1293
#ifdef TARGET_SPARC64
1294
static uint32_t compute_all_logic_xcc(void)
1295
{
1296
    return get_NZ_xcc(CC_DST);
1297
}
1298
#endif
1299

    
1300
typedef struct CCTable {
1301
    uint32_t (*compute_all)(void); /* return all the flags */
1302
    uint32_t (*compute_c)(void);  /* return the C flag */
1303
} CCTable;
1304

    
1305
static const CCTable icc_table[CC_OP_NB] = {
1306
    /* CC_OP_DYNAMIC should never happen */
1307
    [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags },
1308
    [CC_OP_DIV] = { compute_all_div, compute_C_div },
1309
    [CC_OP_ADD] = { compute_all_add, compute_C_add },
1310
    [CC_OP_ADDX] = { compute_all_addx, compute_C_addx },
1311
    [CC_OP_TADD] = { compute_all_tadd, compute_C_add },
1312
    [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_add },
1313
    [CC_OP_SUB] = { compute_all_sub, compute_C_sub },
1314
    [CC_OP_SUBX] = { compute_all_subx, compute_C_subx },
1315
    [CC_OP_TSUB] = { compute_all_tsub, compute_C_sub },
1316
    [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_sub },
1317
    [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic },
1318
};
1319

    
1320
#ifdef TARGET_SPARC64
1321
static const CCTable xcc_table[CC_OP_NB] = {
1322
    /* CC_OP_DYNAMIC should never happen */
1323
    [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc },
1324
    [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic },
1325
    [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc },
1326
    [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc },
1327
    [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc },
1328
    [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc },
1329
    [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1330
    [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc },
1331
    [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1332
    [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc },
1333
    [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic },
1334
};
1335
#endif
1336

    
1337
void helper_compute_psr(void)
1338
{
1339
    uint32_t new_psr;
1340

    
1341
    new_psr = icc_table[CC_OP].compute_all();
1342
    env->psr = new_psr;
1343
#ifdef TARGET_SPARC64
1344
    new_psr = xcc_table[CC_OP].compute_all();
1345
    env->xcc = new_psr;
1346
#endif
1347
    CC_OP = CC_OP_FLAGS;
1348
}
1349

    
1350
uint32_t helper_compute_C_icc(void)
1351
{
1352
    uint32_t ret;
1353

    
1354
    ret = icc_table[CC_OP].compute_c() >> PSR_CARRY_SHIFT;
1355
    return ret;
1356
}
1357

    
1358
static inline void memcpy32(target_ulong *dst, const target_ulong *src)
1359
{
1360
    dst[0] = src[0];
1361
    dst[1] = src[1];
1362
    dst[2] = src[2];
1363
    dst[3] = src[3];
1364
    dst[4] = src[4];
1365
    dst[5] = src[5];
1366
    dst[6] = src[6];
1367
    dst[7] = src[7];
1368
}
1369

    
1370
static void set_cwp(int new_cwp)
1371
{
1372
    /* put the modified wrap registers at their proper location */
1373
    if (env->cwp == env->nwindows - 1) {
1374
        memcpy32(env->regbase, env->regbase + env->nwindows * 16);
1375
    }
1376
    env->cwp = new_cwp;
1377

    
1378
    /* put the wrap registers at their temporary location */
1379
    if (new_cwp == env->nwindows - 1) {
1380
        memcpy32(env->regbase + env->nwindows * 16, env->regbase);
1381
    }
1382
    env->regwptr = env->regbase + (new_cwp * 16);
1383
}
1384

    
1385
void cpu_set_cwp(CPUState *env1, int new_cwp)
1386
{
1387
    CPUState *saved_env;
1388

    
1389
    saved_env = env;
1390
    env = env1;
1391
    set_cwp(new_cwp);
1392
    env = saved_env;
1393
}
1394

    
1395
static target_ulong get_psr(void)
1396
{
1397
    helper_compute_psr();
1398

    
1399
#if !defined (TARGET_SPARC64)
1400
    return env->version | (env->psr & PSR_ICC) |
1401
        (env->psref? PSR_EF : 0) |
1402
        (env->psrpil << 8) |
1403
        (env->psrs? PSR_S : 0) |
1404
        (env->psrps? PSR_PS : 0) |
1405
        (env->psret? PSR_ET : 0) | env->cwp;
1406
#else
1407
    return env->psr & PSR_ICC;
1408
#endif
1409
}
1410

    
1411
target_ulong cpu_get_psr(CPUState *env1)
1412
{
1413
    CPUState *saved_env;
1414
    target_ulong ret;
1415

    
1416
    saved_env = env;
1417
    env = env1;
1418
    ret = get_psr();
1419
    env = saved_env;
1420
    return ret;
1421
}
1422

    
1423
static void put_psr(target_ulong val)
1424
{
1425
    env->psr = val & PSR_ICC;
1426
#if !defined (TARGET_SPARC64)
1427
    env->psref = (val & PSR_EF)? 1 : 0;
1428
    env->psrpil = (val & PSR_PIL) >> 8;
1429
#endif
1430
#if ((!defined (TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY))
1431
    cpu_check_irqs(env);
1432
#endif
1433
#if !defined (TARGET_SPARC64)
1434
    env->psrs = (val & PSR_S)? 1 : 0;
1435
    env->psrps = (val & PSR_PS)? 1 : 0;
1436
    env->psret = (val & PSR_ET)? 1 : 0;
1437
    set_cwp(val & PSR_CWP);
1438
#endif
1439
    env->cc_op = CC_OP_FLAGS;
1440
}
1441

    
1442
void cpu_put_psr(CPUState *env1, target_ulong val)
1443
{
1444
    CPUState *saved_env;
1445

    
1446
    saved_env = env;
1447
    env = env1;
1448
    put_psr(val);
1449
    env = saved_env;
1450
}
1451

    
1452
static int cwp_inc(int cwp)
1453
{
1454
    if (unlikely(cwp >= env->nwindows)) {
1455
        cwp -= env->nwindows;
1456
    }
1457
    return cwp;
1458
}
1459

    
1460
int cpu_cwp_inc(CPUState *env1, int cwp)
1461
{
1462
    CPUState *saved_env;
1463
    target_ulong ret;
1464

    
1465
    saved_env = env;
1466
    env = env1;
1467
    ret = cwp_inc(cwp);
1468
    env = saved_env;
1469
    return ret;
1470
}
1471

    
1472
static int cwp_dec(int cwp)
1473
{
1474
    if (unlikely(cwp < 0)) {
1475
        cwp += env->nwindows;
1476
    }
1477
    return cwp;
1478
}
1479

    
1480
int cpu_cwp_dec(CPUState *env1, int cwp)
1481
{
1482
    CPUState *saved_env;
1483
    target_ulong ret;
1484

    
1485
    saved_env = env;
1486
    env = env1;
1487
    ret = cwp_dec(cwp);
1488
    env = saved_env;
1489
    return ret;
1490
}
1491

    
1492
#ifdef TARGET_SPARC64
1493
GEN_FCMPS(fcmps_fcc1, float32, 22, 0);
1494
GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
1495
GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
1496

    
1497
GEN_FCMPS(fcmps_fcc2, float32, 24, 0);
1498
GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
1499
GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
1500

    
1501
GEN_FCMPS(fcmps_fcc3, float32, 26, 0);
1502
GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
1503
GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
1504

    
1505
GEN_FCMPS(fcmpes_fcc1, float32, 22, 1);
1506
GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
1507
GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
1508

    
1509
GEN_FCMPS(fcmpes_fcc2, float32, 24, 1);
1510
GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
1511
GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
1512

    
1513
GEN_FCMPS(fcmpes_fcc3, float32, 26, 1);
1514
GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
1515
GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
1516
#endif
1517
#undef GEN_FCMPS
1518

    
1519
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
1520
    defined(DEBUG_MXCC)
1521
static void dump_mxcc(CPUState *env)
1522
{
1523
    printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1524
           "\n",
1525
           env->mxccdata[0], env->mxccdata[1],
1526
           env->mxccdata[2], env->mxccdata[3]);
1527
    printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1528
           "\n"
1529
           "          %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1530
           "\n",
1531
           env->mxccregs[0], env->mxccregs[1],
1532
           env->mxccregs[2], env->mxccregs[3],
1533
           env->mxccregs[4], env->mxccregs[5],
1534
           env->mxccregs[6], env->mxccregs[7]);
1535
}
1536
#endif
1537

    
1538
#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
1539
    && defined(DEBUG_ASI)
1540
static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
1541
                     uint64_t r1)
1542
{
1543
    switch (size)
1544
    {
1545
    case 1:
1546
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
1547
                    addr, asi, r1 & 0xff);
1548
        break;
1549
    case 2:
1550
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
1551
                    addr, asi, r1 & 0xffff);
1552
        break;
1553
    case 4:
1554
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
1555
                    addr, asi, r1 & 0xffffffff);
1556
        break;
1557
    case 8:
1558
        DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
1559
                    addr, asi, r1);
1560
        break;
1561
    }
1562
}
1563
#endif
1564

    
1565
#ifndef TARGET_SPARC64
1566
#ifndef CONFIG_USER_ONLY
1567
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1568
{
1569
    uint64_t ret = 0;
1570
#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
1571
    uint32_t last_addr = addr;
1572
#endif
1573

    
1574
    helper_check_align(addr, size - 1);
1575
    switch (asi) {
1576
    case 2: /* SuperSparc MXCC registers */
1577
        switch (addr) {
1578
        case 0x01c00a00: /* MXCC control register */
1579
            if (size == 8)
1580
                ret = env->mxccregs[3];
1581
            else
1582
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1583
                             size);
1584
            break;
1585
        case 0x01c00a04: /* MXCC control register */
1586
            if (size == 4)
1587
                ret = env->mxccregs[3];
1588
            else
1589
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1590
                             size);
1591
            break;
1592
        case 0x01c00c00: /* Module reset register */
1593
            if (size == 8) {
1594
                ret = env->mxccregs[5];
1595
                // should we do something here?
1596
            } else
1597
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1598
                             size);
1599
            break;
1600
        case 0x01c00f00: /* MBus port address register */
1601
            if (size == 8)
1602
                ret = env->mxccregs[7];
1603
            else
1604
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1605
                             size);
1606
            break;
1607
        default:
1608
            DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1609
                         size);
1610
            break;
1611
        }
1612
        DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
1613
                     "addr = %08x -> ret = %" PRIx64 ","
1614
                     "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
1615
#ifdef DEBUG_MXCC
1616
        dump_mxcc(env);
1617
#endif
1618
        break;
1619
    case 3: /* MMU probe */
1620
        {
1621
            int mmulev;
1622

    
1623
            mmulev = (addr >> 8) & 15;
1624
            if (mmulev > 4)
1625
                ret = 0;
1626
            else
1627
                ret = mmu_probe(env, addr, mmulev);
1628
            DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
1629
                        addr, mmulev, ret);
1630
        }
1631
        break;
1632
    case 4: /* read MMU regs */
1633
        {
1634
            int reg = (addr >> 8) & 0x1f;
1635

    
1636
            ret = env->mmuregs[reg];
1637
            if (reg == 3) /* Fault status cleared on read */
1638
                env->mmuregs[3] = 0;
1639
            else if (reg == 0x13) /* Fault status read */
1640
                ret = env->mmuregs[3];
1641
            else if (reg == 0x14) /* Fault address read */
1642
                ret = env->mmuregs[4];
1643
            DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
1644
        }
1645
        break;
1646
    case 5: // Turbosparc ITLB Diagnostic
1647
    case 6: // Turbosparc DTLB Diagnostic
1648
    case 7: // Turbosparc IOTLB Diagnostic
1649
        break;
1650
    case 9: /* Supervisor code access */
1651
        switch(size) {
1652
        case 1:
1653
            ret = ldub_code(addr);
1654
            break;
1655
        case 2:
1656
            ret = lduw_code(addr);
1657
            break;
1658
        default:
1659
        case 4:
1660
            ret = ldl_code(addr);
1661
            break;
1662
        case 8:
1663
            ret = ldq_code(addr);
1664
            break;
1665
        }
1666
        break;
1667
    case 0xa: /* User data access */
1668
        switch(size) {
1669
        case 1:
1670
            ret = ldub_user(addr);
1671
            break;
1672
        case 2:
1673
            ret = lduw_user(addr);
1674
            break;
1675
        default:
1676
        case 4:
1677
            ret = ldl_user(addr);
1678
            break;
1679
        case 8:
1680
            ret = ldq_user(addr);
1681
            break;
1682
        }
1683
        break;
1684
    case 0xb: /* Supervisor data access */
1685
        switch(size) {
1686
        case 1:
1687
            ret = ldub_kernel(addr);
1688
            break;
1689
        case 2:
1690
            ret = lduw_kernel(addr);
1691
            break;
1692
        default:
1693
        case 4:
1694
            ret = ldl_kernel(addr);
1695
            break;
1696
        case 8:
1697
            ret = ldq_kernel(addr);
1698
            break;
1699
        }
1700
        break;
1701
    case 0xc: /* I-cache tag */
1702
    case 0xd: /* I-cache data */
1703
    case 0xe: /* D-cache tag */
1704
    case 0xf: /* D-cache data */
1705
        break;
1706
    case 0x20: /* MMU passthrough */
1707
        switch(size) {
1708
        case 1:
1709
            ret = ldub_phys(addr);
1710
            break;
1711
        case 2:
1712
            ret = lduw_phys(addr);
1713
            break;
1714
        default:
1715
        case 4:
1716
            ret = ldl_phys(addr);
1717
            break;
1718
        case 8:
1719
            ret = ldq_phys(addr);
1720
            break;
1721
        }
1722
        break;
1723
    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1724
        switch(size) {
1725
        case 1:
1726
            ret = ldub_phys((target_phys_addr_t)addr
1727
                            | ((target_phys_addr_t)(asi & 0xf) << 32));
1728
            break;
1729
        case 2:
1730
            ret = lduw_phys((target_phys_addr_t)addr
1731
                            | ((target_phys_addr_t)(asi & 0xf) << 32));
1732
            break;
1733
        default:
1734
        case 4:
1735
            ret = ldl_phys((target_phys_addr_t)addr
1736
                           | ((target_phys_addr_t)(asi & 0xf) << 32));
1737
            break;
1738
        case 8:
1739
            ret = ldq_phys((target_phys_addr_t)addr
1740
                           | ((target_phys_addr_t)(asi & 0xf) << 32));
1741
            break;
1742
        }
1743
        break;
1744
    case 0x30: // Turbosparc secondary cache diagnostic
1745
    case 0x31: // Turbosparc RAM snoop
1746
    case 0x32: // Turbosparc page table descriptor diagnostic
1747
    case 0x39: /* data cache diagnostic register */
1748
        ret = 0;
1749
        break;
1750
    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
1751
        {
1752
            int reg = (addr >> 8) & 3;
1753

    
1754
            switch(reg) {
1755
            case 0: /* Breakpoint Value (Addr) */
1756
                ret = env->mmubpregs[reg];
1757
                break;
1758
            case 1: /* Breakpoint Mask */
1759
                ret = env->mmubpregs[reg];
1760
                break;
1761
            case 2: /* Breakpoint Control */
1762
                ret = env->mmubpregs[reg];
1763
                break;
1764
            case 3: /* Breakpoint Status */
1765
                ret = env->mmubpregs[reg];
1766
                env->mmubpregs[reg] = 0ULL;
1767
                break;
1768
            }
1769
            DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
1770
                        ret);
1771
        }
1772
        break;
1773
    case 8: /* User code access, XXX */
1774
    default:
1775
        do_unassigned_access(addr, 0, 0, asi, size);
1776
        ret = 0;
1777
        break;
1778
    }
1779
    if (sign) {
1780
        switch(size) {
1781
        case 1:
1782
            ret = (int8_t) ret;
1783
            break;
1784
        case 2:
1785
            ret = (int16_t) ret;
1786
            break;
1787
        case 4:
1788
            ret = (int32_t) ret;
1789
            break;
1790
        default:
1791
            break;
1792
        }
1793
    }
1794
#ifdef DEBUG_ASI
1795
    dump_asi("read ", last_addr, asi, size, ret);
1796
#endif
1797
    return ret;
1798
}
1799

    
1800
void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
1801
{
1802
    helper_check_align(addr, size - 1);
1803
    switch(asi) {
1804
    case 2: /* SuperSparc MXCC registers */
1805
        switch (addr) {
1806
        case 0x01c00000: /* MXCC stream data register 0 */
1807
            if (size == 8)
1808
                env->mxccdata[0] = val;
1809
            else
1810
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1811
                             size);
1812
            break;
1813
        case 0x01c00008: /* MXCC stream data register 1 */
1814
            if (size == 8)
1815
                env->mxccdata[1] = val;
1816
            else
1817
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1818
                             size);
1819
            break;
1820
        case 0x01c00010: /* MXCC stream data register 2 */
1821
            if (size == 8)
1822
                env->mxccdata[2] = val;
1823
            else
1824
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1825
                             size);
1826
            break;
1827
        case 0x01c00018: /* MXCC stream data register 3 */
1828
            if (size == 8)
1829
                env->mxccdata[3] = val;
1830
            else
1831
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1832
                             size);
1833
            break;
1834
        case 0x01c00100: /* MXCC stream source */
1835
            if (size == 8)
1836
                env->mxccregs[0] = val;
1837
            else
1838
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1839
                             size);
1840
            env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1841
                                        0);
1842
            env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1843
                                        8);
1844
            env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1845
                                        16);
1846
            env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1847
                                        24);
1848
            break;
1849
        case 0x01c00200: /* MXCC stream destination */
1850
            if (size == 8)
1851
                env->mxccregs[1] = val;
1852
            else
1853
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1854
                             size);
1855
            stq_phys((env->mxccregs[1] & 0xffffffffULL) +  0,
1856
                     env->mxccdata[0]);
1857
            stq_phys((env->mxccregs[1] & 0xffffffffULL) +  8,
1858
                     env->mxccdata[1]);
1859
            stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
1860
                     env->mxccdata[2]);
1861
            stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
1862
                     env->mxccdata[3]);
1863
            break;
1864
        case 0x01c00a00: /* MXCC control register */
1865
            if (size == 8)
1866
                env->mxccregs[3] = val;
1867
            else
1868
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1869
                             size);
1870
            break;
1871
        case 0x01c00a04: /* MXCC control register */
1872
            if (size == 4)
1873
                env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
1874
                    | val;
1875
            else
1876
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1877
                             size);
1878
            break;
1879
        case 0x01c00e00: /* MXCC error register  */
1880
            // writing a 1 bit clears the error
1881
            if (size == 8)
1882
                env->mxccregs[6] &= ~val;
1883
            else
1884
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1885
                             size);
1886
            break;
1887
        case 0x01c00f00: /* MBus port address register */
1888
            if (size == 8)
1889
                env->mxccregs[7] = val;
1890
            else
1891
                DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1892
                             size);
1893
            break;
1894
        default:
1895
            DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1896
                         size);
1897
            break;
1898
        }
1899
        DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
1900
                     asi, size, addr, val);
1901
#ifdef DEBUG_MXCC
1902
        dump_mxcc(env);
1903
#endif
1904
        break;
1905
    case 3: /* MMU flush */
1906
        {
1907
            int mmulev;
1908

    
1909
            mmulev = (addr >> 8) & 15;
1910
            DPRINTF_MMU("mmu flush level %d\n", mmulev);
1911
            switch (mmulev) {
1912
            case 0: // flush page
1913
                tlb_flush_page(env, addr & 0xfffff000);
1914
                break;
1915
            case 1: // flush segment (256k)
1916
            case 2: // flush region (16M)
1917
            case 3: // flush context (4G)
1918
            case 4: // flush entire
1919
                tlb_flush(env, 1);
1920
                break;
1921
            default:
1922
                break;
1923
            }
1924
#ifdef DEBUG_MMU
1925
            dump_mmu(env);
1926
#endif
1927
        }
1928
        break;
1929
    case 4: /* write MMU regs */
1930
        {
1931
            int reg = (addr >> 8) & 0x1f;
1932
            uint32_t oldreg;
1933

    
1934
            oldreg = env->mmuregs[reg];
1935
            switch(reg) {
1936
            case 0: // Control Register
1937
                env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
1938
                                    (val & 0x00ffffff);
1939
                // Mappings generated during no-fault mode or MMU
1940
                // disabled mode are invalid in normal mode
1941
                if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
1942
                    (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
1943
                    tlb_flush(env, 1);
1944
                break;
1945
            case 1: // Context Table Pointer Register
1946
                env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
1947
                break;
1948
            case 2: // Context Register
1949
                env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
1950
                if (oldreg != env->mmuregs[reg]) {
1951
                    /* we flush when the MMU context changes because
1952
                       QEMU has no MMU context support */
1953
                    tlb_flush(env, 1);
1954
                }
1955
                break;
1956
            case 3: // Synchronous Fault Status Register with Clear
1957
            case 4: // Synchronous Fault Address Register
1958
                break;
1959
            case 0x10: // TLB Replacement Control Register
1960
                env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
1961
                break;
1962
            case 0x13: // Synchronous Fault Status Register with Read and Clear
1963
                env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
1964
                break;
1965
            case 0x14: // Synchronous Fault Address Register
1966
                env->mmuregs[4] = val;
1967
                break;
1968
            default:
1969
                env->mmuregs[reg] = val;
1970
                break;
1971
            }
1972
            if (oldreg != env->mmuregs[reg]) {
1973
                DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1974
                            reg, oldreg, env->mmuregs[reg]);
1975
            }
1976
#ifdef DEBUG_MMU
1977
            dump_mmu(env);
1978
#endif
1979
        }
1980
        break;
1981
    case 5: // Turbosparc ITLB Diagnostic
1982
    case 6: // Turbosparc DTLB Diagnostic
1983
    case 7: // Turbosparc IOTLB Diagnostic
1984
        break;
1985
    case 0xa: /* User data access */
1986
        switch(size) {
1987
        case 1:
1988
            stb_user(addr, val);
1989
            break;
1990
        case 2:
1991
            stw_user(addr, val);
1992
            break;
1993
        default:
1994
        case 4:
1995
            stl_user(addr, val);
1996
            break;
1997
        case 8:
1998
            stq_user(addr, val);
1999
            break;
2000
        }
2001
        break;
2002
    case 0xb: /* Supervisor data access */
2003
        switch(size) {
2004
        case 1:
2005
            stb_kernel(addr, val);
2006
            break;
2007
        case 2:
2008
            stw_kernel(addr, val);
2009
            break;
2010
        default:
2011
        case 4:
2012
            stl_kernel(addr, val);
2013
            break;
2014
        case 8:
2015
            stq_kernel(addr, val);
2016
            break;
2017
        }
2018
        break;
2019
    case 0xc: /* I-cache tag */
2020
    case 0xd: /* I-cache data */
2021
    case 0xe: /* D-cache tag */
2022
    case 0xf: /* D-cache data */
2023
    case 0x10: /* I/D-cache flush page */
2024
    case 0x11: /* I/D-cache flush segment */
2025
    case 0x12: /* I/D-cache flush region */
2026
    case 0x13: /* I/D-cache flush context */
2027
    case 0x14: /* I/D-cache flush user */
2028
        break;
2029
    case 0x17: /* Block copy, sta access */
2030
        {
2031
            // val = src
2032
            // addr = dst
2033
            // copy 32 bytes
2034
            unsigned int i;
2035
            uint32_t src = val & ~3, dst = addr & ~3, temp;
2036

    
2037
            for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
2038
                temp = ldl_kernel(src);
2039
                stl_kernel(dst, temp);
2040
            }
2041
        }
2042
        break;
2043
    case 0x1f: /* Block fill, stda access */
2044
        {
2045
            // addr = dst
2046
            // fill 32 bytes with val
2047
            unsigned int i;
2048
            uint32_t dst = addr & 7;
2049

    
2050
            for (i = 0; i < 32; i += 8, dst += 8)
2051
                stq_kernel(dst, val);
2052
        }
2053
        break;
2054
    case 0x20: /* MMU passthrough */
2055
        {
2056
            switch(size) {
2057
            case 1:
2058
                stb_phys(addr, val);
2059
                break;
2060
            case 2:
2061
                stw_phys(addr, val);
2062
                break;
2063
            case 4:
2064
            default:
2065
                stl_phys(addr, val);
2066
                break;
2067
            case 8:
2068
                stq_phys(addr, val);
2069
                break;
2070
            }
2071
        }
2072
        break;
2073
    case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
2074
        {
2075
            switch(size) {
2076
            case 1:
2077
                stb_phys((target_phys_addr_t)addr
2078
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2079
                break;
2080
            case 2:
2081
                stw_phys((target_phys_addr_t)addr
2082
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2083
                break;
2084
            case 4:
2085
            default:
2086
                stl_phys((target_phys_addr_t)addr
2087
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2088
                break;
2089
            case 8:
2090
                stq_phys((target_phys_addr_t)addr
2091
                         | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2092
                break;
2093
            }
2094
        }
2095
        break;
2096
    case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
2097
    case 0x31: // store buffer data, Ross RT620 I-cache flush or
2098
               // Turbosparc snoop RAM
2099
    case 0x32: // store buffer control or Turbosparc page table
2100
               // descriptor diagnostic
2101
    case 0x36: /* I-cache flash clear */
2102
    case 0x37: /* D-cache flash clear */
2103
    case 0x4c: /* breakpoint action */
2104
        break;
2105
    case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
2106
        {
2107
            int reg = (addr >> 8) & 3;
2108

    
2109
            switch(reg) {
2110
            case 0: /* Breakpoint Value (Addr) */
2111
                env->mmubpregs[reg] = (val & 0xfffffffffULL);
2112
                break;
2113
            case 1: /* Breakpoint Mask */
2114
                env->mmubpregs[reg] = (val & 0xfffffffffULL);
2115
                break;
2116
            case 2: /* Breakpoint Control */
2117
                env->mmubpregs[reg] = (val & 0x7fULL);
2118
                break;
2119
            case 3: /* Breakpoint Status */
2120
                env->mmubpregs[reg] = (val & 0xfULL);
2121
                break;
2122
            }
2123
            DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
2124
                        env->mmuregs[reg]);
2125
        }
2126
        break;
2127
    case 8: /* User code access, XXX */
2128
    case 9: /* Supervisor code access, XXX */
2129
    default:
2130
        do_unassigned_access(addr, 1, 0, asi, size);
2131
        break;
2132
    }
2133
#ifdef DEBUG_ASI
2134
    dump_asi("write", addr, asi, size, val);
2135
#endif
2136
}
2137

    
2138
#endif /* CONFIG_USER_ONLY */
2139
#else /* TARGET_SPARC64 */
2140

    
2141
#ifdef CONFIG_USER_ONLY
2142
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2143
{
2144
    uint64_t ret = 0;
2145
#if defined(DEBUG_ASI)
2146
    target_ulong last_addr = addr;
2147
#endif
2148

    
2149
    if (asi < 0x80)
2150
        raise_exception(TT_PRIV_ACT);
2151

    
2152
    helper_check_align(addr, size - 1);
2153
    addr = address_mask(env, addr);
2154

    
2155
    switch (asi) {
2156
    case 0x82: // Primary no-fault
2157
    case 0x8a: // Primary no-fault LE
2158
        if (page_check_range(addr, size, PAGE_READ) == -1) {
2159
#ifdef DEBUG_ASI
2160
            dump_asi("read ", last_addr, asi, size, ret);
2161
#endif
2162
            return 0;
2163
        }
2164
        // Fall through
2165
    case 0x80: // Primary
2166
    case 0x88: // Primary LE
2167
        {
2168
            switch(size) {
2169
            case 1:
2170
                ret = ldub_raw(addr);
2171
                break;
2172
            case 2:
2173
                ret = lduw_raw(addr);
2174
                break;
2175
            case 4:
2176
                ret = ldl_raw(addr);
2177
                break;
2178
            default:
2179
            case 8:
2180
                ret = ldq_raw(addr);
2181
                break;
2182
            }
2183
        }
2184
        break;
2185
    case 0x83: // Secondary no-fault
2186
    case 0x8b: // Secondary no-fault LE
2187
        if (page_check_range(addr, size, PAGE_READ) == -1) {
2188
#ifdef DEBUG_ASI
2189
            dump_asi("read ", last_addr, asi, size, ret);
2190
#endif
2191
            return 0;
2192
        }
2193
        // Fall through
2194
    case 0x81: // Secondary
2195
    case 0x89: // Secondary LE
2196
        // XXX
2197
        break;
2198
    default:
2199
        break;
2200
    }
2201

    
2202
    /* Convert from little endian */
2203
    switch (asi) {
2204
    case 0x88: // Primary LE
2205
    case 0x89: // Secondary LE
2206
    case 0x8a: // Primary no-fault LE
2207
    case 0x8b: // Secondary no-fault LE
2208
        switch(size) {
2209
        case 2:
2210
            ret = bswap16(ret);
2211
            break;
2212
        case 4:
2213
            ret = bswap32(ret);
2214
            break;
2215
        case 8:
2216
            ret = bswap64(ret);
2217
            break;
2218
        default:
2219
            break;
2220
        }
2221
    default:
2222
        break;
2223
    }
2224

    
2225
    /* Convert to signed number */
2226
    if (sign) {
2227
        switch(size) {
2228
        case 1:
2229
            ret = (int8_t) ret;
2230
            break;
2231
        case 2:
2232
            ret = (int16_t) ret;
2233
            break;
2234
        case 4:
2235
            ret = (int32_t) ret;
2236
            break;
2237
        default:
2238
            break;
2239
        }
2240
    }
2241
#ifdef DEBUG_ASI
2242
    dump_asi("read ", last_addr, asi, size, ret);
2243
#endif
2244
    return ret;
2245
}
2246

    
2247
void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2248
{
2249
#ifdef DEBUG_ASI
2250
    dump_asi("write", addr, asi, size, val);
2251
#endif
2252
    if (asi < 0x80)
2253
        raise_exception(TT_PRIV_ACT);
2254

    
2255
    helper_check_align(addr, size - 1);
2256
    addr = address_mask(env, addr);
2257

    
2258
    /* Convert to little endian */
2259
    switch (asi) {
2260
    case 0x88: // Primary LE
2261
    case 0x89: // Secondary LE
2262
        switch(size) {
2263
        case 2:
2264
            val = bswap16(val);
2265
            break;
2266
        case 4:
2267
            val = bswap32(val);
2268
            break;
2269
        case 8:
2270
            val = bswap64(val);
2271
            break;
2272
        default:
2273
            break;
2274
        }
2275
    default:
2276
        break;
2277
    }
2278

    
2279
    switch(asi) {
2280
    case 0x80: // Primary
2281
    case 0x88: // Primary LE
2282
        {
2283
            switch(size) {
2284
            case 1:
2285
                stb_raw(addr, val);
2286
                break;
2287
            case 2:
2288
                stw_raw(addr, val);
2289
                break;
2290
            case 4:
2291
                stl_raw(addr, val);
2292
                break;
2293
            case 8:
2294
            default:
2295
                stq_raw(addr, val);
2296
                break;
2297
            }
2298
        }
2299
        break;
2300
    case 0x81: // Secondary
2301
    case 0x89: // Secondary LE
2302
        // XXX
2303
        return;
2304

    
2305
    case 0x82: // Primary no-fault, RO
2306
    case 0x83: // Secondary no-fault, RO
2307
    case 0x8a: // Primary no-fault LE, RO
2308
    case 0x8b: // Secondary no-fault LE, RO
2309
    default:
2310
        do_unassigned_access(addr, 1, 0, 1, size);
2311
        return;
2312
    }
2313
}
2314

    
2315
#else /* CONFIG_USER_ONLY */
2316

    
2317
uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2318
{
2319
    uint64_t ret = 0;
2320
#if defined(DEBUG_ASI)
2321
    target_ulong last_addr = addr;
2322
#endif
2323

    
2324
    asi &= 0xff;
2325

    
2326
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2327
        || (cpu_has_hypervisor(env)
2328
            && asi >= 0x30 && asi < 0x80
2329
            && !(env->hpstate & HS_PRIV)))
2330
        raise_exception(TT_PRIV_ACT);
2331

    
2332
    helper_check_align(addr, size - 1);
2333
    switch (asi) {
2334
    case 0x82: // Primary no-fault
2335
    case 0x8a: // Primary no-fault LE
2336
    case 0x83: // Secondary no-fault
2337
    case 0x8b: // Secondary no-fault LE
2338
        {
2339
            /* secondary space access has lowest asi bit equal to 1 */
2340
            int access_mmu_idx = ( asi & 1 ) ? MMU_KERNEL_IDX
2341
                                             : MMU_KERNEL_SECONDARY_IDX;
2342

    
2343
            if (cpu_get_phys_page_nofault(env, addr, access_mmu_idx) == -1ULL) {
2344
#ifdef DEBUG_ASI
2345
                dump_asi("read ", last_addr, asi, size, ret);
2346
#endif
2347
                return 0;
2348
            }
2349
        }
2350
        // Fall through
2351
    case 0x10: // As if user primary
2352
    case 0x11: // As if user secondary
2353
    case 0x18: // As if user primary LE
2354
    case 0x19: // As if user secondary LE
2355
    case 0x80: // Primary
2356
    case 0x81: // Secondary
2357
    case 0x88: // Primary LE
2358
    case 0x89: // Secondary LE
2359
    case 0xe2: // UA2007 Primary block init
2360
    case 0xe3: // UA2007 Secondary block init
2361
        if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2362
            if (cpu_hypervisor_mode(env)) {
2363
                switch(size) {
2364
                case 1:
2365
                    ret = ldub_hypv(addr);
2366
                    break;
2367
                case 2:
2368
                    ret = lduw_hypv(addr);
2369
                    break;
2370
                case 4:
2371
                    ret = ldl_hypv(addr);
2372
                    break;
2373
                default:
2374
                case 8:
2375
                    ret = ldq_hypv(addr);
2376
                    break;
2377
                }
2378
            } else {
2379
                /* secondary space access has lowest asi bit equal to 1 */
2380
                if (asi & 1) {
2381
                    switch(size) {
2382
                    case 1:
2383
                        ret = ldub_kernel_secondary(addr);
2384
                        break;
2385
                    case 2:
2386
                        ret = lduw_kernel_secondary(addr);
2387
                        break;
2388
                    case 4:
2389
                        ret = ldl_kernel_secondary(addr);
2390
                        break;
2391
                    default:
2392
                    case 8:
2393
                        ret = ldq_kernel_secondary(addr);
2394
                        break;
2395
                    }
2396
                } else {
2397
                    switch(size) {
2398
                    case 1:
2399
                        ret = ldub_kernel(addr);
2400
                        break;
2401
                    case 2:
2402
                        ret = lduw_kernel(addr);
2403
                        break;
2404
                    case 4:
2405
                        ret = ldl_kernel(addr);
2406
                        break;
2407
                    default:
2408
                    case 8:
2409
                        ret = ldq_kernel(addr);
2410
                        break;
2411
                    }
2412
                }
2413
            }
2414
        } else {
2415
            /* secondary space access has lowest asi bit equal to 1 */
2416
            if (asi & 1) {
2417
                switch(size) {
2418
                case 1:
2419
                    ret = ldub_user_secondary(addr);
2420
                    break;
2421
                case 2:
2422
                    ret = lduw_user_secondary(addr);
2423
                    break;
2424
                case 4:
2425
                    ret = ldl_user_secondary(addr);
2426
                    break;
2427
                default:
2428
                case 8:
2429
                    ret = ldq_user_secondary(addr);
2430
                    break;
2431
                }
2432
            } else {
2433
                switch(size) {
2434
                case 1:
2435
                    ret = ldub_user(addr);
2436
                    break;
2437
                case 2:
2438
                    ret = lduw_user(addr);
2439
                    break;
2440
                case 4:
2441
                    ret = ldl_user(addr);
2442
                    break;
2443
                default:
2444
                case 8:
2445
                    ret = ldq_user(addr);
2446
                    break;
2447
                }
2448
            }
2449
        }
2450
        break;
2451
    case 0x14: // Bypass
2452
    case 0x15: // Bypass, non-cacheable
2453
    case 0x1c: // Bypass LE
2454
    case 0x1d: // Bypass, non-cacheable LE
2455
        {
2456
            switch(size) {
2457
            case 1:
2458
                ret = ldub_phys(addr);
2459
                break;
2460
            case 2:
2461
                ret = lduw_phys(addr);
2462
                break;
2463
            case 4:
2464
                ret = ldl_phys(addr);
2465
                break;
2466
            default:
2467
            case 8:
2468
                ret = ldq_phys(addr);
2469
                break;
2470
            }
2471
            break;
2472
        }
2473
    case 0x24: // Nucleus quad LDD 128 bit atomic
2474
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2475
        //  Only ldda allowed
2476
        raise_exception(TT_ILL_INSN);
2477
        return 0;
2478
    case 0x04: // Nucleus
2479
    case 0x0c: // Nucleus Little Endian (LE)
2480
    {
2481
        switch(size) {
2482
        case 1:
2483
            ret = ldub_nucleus(addr);
2484
            break;
2485
        case 2:
2486
            ret = lduw_nucleus(addr);
2487
            break;
2488
        case 4:
2489
            ret = ldl_nucleus(addr);
2490
            break;
2491
        default:
2492
        case 8:
2493
            ret = ldq_nucleus(addr);
2494
            break;
2495
        }
2496
        break;
2497
    }
2498
    case 0x4a: // UPA config
2499
        // XXX
2500
        break;
2501
    case 0x45: // LSU
2502
        ret = env->lsu;
2503
        break;
2504
    case 0x50: // I-MMU regs
2505
        {
2506
            int reg = (addr >> 3) & 0xf;
2507

    
2508
            if (reg == 0) {
2509
                // I-TSB Tag Target register
2510
                ret = ultrasparc_tag_target(env->immu.tag_access);
2511
            } else {
2512
                ret = env->immuregs[reg];
2513
            }
2514

    
2515
            break;
2516
        }
2517
    case 0x51: // I-MMU 8k TSB pointer
2518
        {
2519
            // env->immuregs[5] holds I-MMU TSB register value
2520
            // env->immuregs[6] holds I-MMU Tag Access register value
2521
            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2522
                                         8*1024);
2523
            break;
2524
        }
2525
    case 0x52: // I-MMU 64k TSB pointer
2526
        {
2527
            // env->immuregs[5] holds I-MMU TSB register value
2528
            // env->immuregs[6] holds I-MMU Tag Access register value
2529
            ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2530
                                         64*1024);
2531
            break;
2532
        }
2533
    case 0x55: // I-MMU data access
2534
        {
2535
            int reg = (addr >> 3) & 0x3f;
2536

    
2537
            ret = env->itlb[reg].tte;
2538
            break;
2539
        }
2540
    case 0x56: // I-MMU tag read
2541
        {
2542
            int reg = (addr >> 3) & 0x3f;
2543

    
2544
            ret = env->itlb[reg].tag;
2545
            break;
2546
        }
2547
    case 0x58: // D-MMU regs
2548
        {
2549
            int reg = (addr >> 3) & 0xf;
2550

    
2551
            if (reg == 0) {
2552
                // D-TSB Tag Target register
2553
                ret = ultrasparc_tag_target(env->dmmu.tag_access);
2554
            } else {
2555
                ret = env->dmmuregs[reg];
2556
            }
2557
            break;
2558
        }
2559
    case 0x59: // D-MMU 8k TSB pointer
2560
        {
2561
            // env->dmmuregs[5] holds D-MMU TSB register value
2562
            // env->dmmuregs[6] holds D-MMU Tag Access register value
2563
            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2564
                                         8*1024);
2565
            break;
2566
        }
2567
    case 0x5a: // D-MMU 64k TSB pointer
2568
        {
2569
            // env->dmmuregs[5] holds D-MMU TSB register value
2570
            // env->dmmuregs[6] holds D-MMU Tag Access register value
2571
            ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2572
                                         64*1024);
2573
            break;
2574
        }
2575
    case 0x5d: // D-MMU data access
2576
        {
2577
            int reg = (addr >> 3) & 0x3f;
2578

    
2579
            ret = env->dtlb[reg].tte;
2580
            break;
2581
        }
2582
    case 0x5e: // D-MMU tag read
2583
        {
2584
            int reg = (addr >> 3) & 0x3f;
2585

    
2586
            ret = env->dtlb[reg].tag;
2587
            break;
2588
        }
2589
    case 0x46: // D-cache data
2590
    case 0x47: // D-cache tag access
2591
    case 0x4b: // E-cache error enable
2592
    case 0x4c: // E-cache asynchronous fault status
2593
    case 0x4d: // E-cache asynchronous fault address
2594
    case 0x4e: // E-cache tag data
2595
    case 0x66: // I-cache instruction access
2596
    case 0x67: // I-cache tag access
2597
    case 0x6e: // I-cache predecode
2598
    case 0x6f: // I-cache LRU etc.
2599
    case 0x76: // E-cache tag
2600
    case 0x7e: // E-cache tag
2601
        break;
2602
    case 0x5b: // D-MMU data pointer
2603
    case 0x48: // Interrupt dispatch, RO
2604
    case 0x49: // Interrupt data receive
2605
    case 0x7f: // Incoming interrupt vector, RO
2606
        // XXX
2607
        break;
2608
    case 0x54: // I-MMU data in, WO
2609
    case 0x57: // I-MMU demap, WO
2610
    case 0x5c: // D-MMU data in, WO
2611
    case 0x5f: // D-MMU demap, WO
2612
    case 0x77: // Interrupt vector, WO
2613
    default:
2614
        do_unassigned_access(addr, 0, 0, 1, size);
2615
        ret = 0;
2616
        break;
2617
    }
2618

    
2619
    /* Convert from little endian */
2620
    switch (asi) {
2621
    case 0x0c: // Nucleus Little Endian (LE)
2622
    case 0x18: // As if user primary LE
2623
    case 0x19: // As if user secondary LE
2624
    case 0x1c: // Bypass LE
2625
    case 0x1d: // Bypass, non-cacheable LE
2626
    case 0x88: // Primary LE
2627
    case 0x89: // Secondary LE
2628
    case 0x8a: // Primary no-fault LE
2629
    case 0x8b: // Secondary no-fault LE
2630
        switch(size) {
2631
        case 2:
2632
            ret = bswap16(ret);
2633
            break;
2634
        case 4:
2635
            ret = bswap32(ret);
2636
            break;
2637
        case 8:
2638
            ret = bswap64(ret);
2639
            break;
2640
        default:
2641
            break;
2642
        }
2643
    default:
2644
        break;
2645
    }
2646

    
2647
    /* Convert to signed number */
2648
    if (sign) {
2649
        switch(size) {
2650
        case 1:
2651
            ret = (int8_t) ret;
2652
            break;
2653
        case 2:
2654
            ret = (int16_t) ret;
2655
            break;
2656
        case 4:
2657
            ret = (int32_t) ret;
2658
            break;
2659
        default:
2660
            break;
2661
        }
2662
    }
2663
#ifdef DEBUG_ASI
2664
    dump_asi("read ", last_addr, asi, size, ret);
2665
#endif
2666
    return ret;
2667
}
2668

    
2669
void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2670
{
2671
#ifdef DEBUG_ASI
2672
    dump_asi("write", addr, asi, size, val);
2673
#endif
2674

    
2675
    asi &= 0xff;
2676

    
2677
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2678
        || (cpu_has_hypervisor(env)
2679
            && asi >= 0x30 && asi < 0x80
2680
            && !(env->hpstate & HS_PRIV)))
2681
        raise_exception(TT_PRIV_ACT);
2682

    
2683
    helper_check_align(addr, size - 1);
2684
    /* Convert to little endian */
2685
    switch (asi) {
2686
    case 0x0c: // Nucleus Little Endian (LE)
2687
    case 0x18: // As if user primary LE
2688
    case 0x19: // As if user secondary LE
2689
    case 0x1c: // Bypass LE
2690
    case 0x1d: // Bypass, non-cacheable LE
2691
    case 0x88: // Primary LE
2692
    case 0x89: // Secondary LE
2693
        switch(size) {
2694
        case 2:
2695
            val = bswap16(val);
2696
            break;
2697
        case 4:
2698
            val = bswap32(val);
2699
            break;
2700
        case 8:
2701
            val = bswap64(val);
2702
            break;
2703
        default:
2704
            break;
2705
        }
2706
    default:
2707
        break;
2708
    }
2709

    
2710
    switch(asi) {
2711
    case 0x10: // As if user primary
2712
    case 0x11: // As if user secondary
2713
    case 0x18: // As if user primary LE
2714
    case 0x19: // As if user secondary LE
2715
    case 0x80: // Primary
2716
    case 0x81: // Secondary
2717
    case 0x88: // Primary LE
2718
    case 0x89: // Secondary LE
2719
    case 0xe2: // UA2007 Primary block init
2720
    case 0xe3: // UA2007 Secondary block init
2721
        if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2722
            if (cpu_hypervisor_mode(env)) {
2723
                switch(size) {
2724
                case 1:
2725
                    stb_hypv(addr, val);
2726
                    break;
2727
                case 2:
2728
                    stw_hypv(addr, val);
2729
                    break;
2730
                case 4:
2731
                    stl_hypv(addr, val);
2732
                    break;
2733
                case 8:
2734
                default:
2735
                    stq_hypv(addr, val);
2736
                    break;
2737
                }
2738
            } else {
2739
                /* secondary space access has lowest asi bit equal to 1 */
2740
                if (asi & 1) {
2741
                    switch(size) {
2742
                    case 1:
2743
                        stb_kernel_secondary(addr, val);
2744
                        break;
2745
                    case 2:
2746
                        stw_kernel_secondary(addr, val);
2747
                        break;
2748
                    case 4:
2749
                        stl_kernel_secondary(addr, val);
2750
                        break;
2751
                    case 8:
2752
                    default:
2753
                        stq_kernel_secondary(addr, val);
2754
                        break;
2755
                    }
2756
                } else {
2757
                    switch(size) {
2758
                    case 1:
2759
                        stb_kernel(addr, val);
2760
                        break;
2761
                    case 2:
2762
                        stw_kernel(addr, val);
2763
                        break;
2764
                    case 4:
2765
                        stl_kernel(addr, val);
2766
                        break;
2767
                    case 8:
2768
                    default:
2769
                        stq_kernel(addr, val);
2770
                        break;
2771
                    }
2772
                }
2773
            }
2774
        } else {
2775
            /* secondary space access has lowest asi bit equal to 1 */
2776
            if (asi & 1) {
2777
                switch(size) {
2778
                case 1:
2779
                    stb_user_secondary(addr, val);
2780
                    break;
2781
                case 2:
2782
                    stw_user_secondary(addr, val);
2783
                    break;
2784
                case 4:
2785
                    stl_user_secondary(addr, val);
2786
                    break;
2787
                case 8:
2788
                default:
2789
                    stq_user_secondary(addr, val);
2790
                    break;
2791
                }
2792
            } else {
2793
                switch(size) {
2794
                case 1:
2795
                    stb_user(addr, val);
2796
                    break;
2797
                case 2:
2798
                    stw_user(addr, val);
2799
                    break;
2800
                case 4:
2801
                    stl_user(addr, val);
2802
                    break;
2803
                case 8:
2804
                default:
2805
                    stq_user(addr, val);
2806
                    break;
2807
                }
2808
            }
2809
        }
2810
        break;
2811
    case 0x14: // Bypass
2812
    case 0x15: // Bypass, non-cacheable
2813
    case 0x1c: // Bypass LE
2814
    case 0x1d: // Bypass, non-cacheable LE
2815
        {
2816
            switch(size) {
2817
            case 1:
2818
                stb_phys(addr, val);
2819
                break;
2820
            case 2:
2821
                stw_phys(addr, val);
2822
                break;
2823
            case 4:
2824
                stl_phys(addr, val);
2825
                break;
2826
            case 8:
2827
            default:
2828
                stq_phys(addr, val);
2829
                break;
2830
            }
2831
        }
2832
        return;
2833
    case 0x24: // Nucleus quad LDD 128 bit atomic
2834
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2835
        //  Only ldda allowed
2836
        raise_exception(TT_ILL_INSN);
2837
        return;
2838
    case 0x04: // Nucleus
2839
    case 0x0c: // Nucleus Little Endian (LE)
2840
    {
2841
        switch(size) {
2842
        case 1:
2843
            stb_nucleus(addr, val);
2844
            break;
2845
        case 2:
2846
            stw_nucleus(addr, val);
2847
            break;
2848
        case 4:
2849
            stl_nucleus(addr, val);
2850
            break;
2851
        default:
2852
        case 8:
2853
            stq_nucleus(addr, val);
2854
            break;
2855
        }
2856
        break;
2857
    }
2858

    
2859
    case 0x4a: // UPA config
2860
        // XXX
2861
        return;
2862
    case 0x45: // LSU
2863
        {
2864
            uint64_t oldreg;
2865

    
2866
            oldreg = env->lsu;
2867
            env->lsu = val & (DMMU_E | IMMU_E);
2868
            // Mappings generated during D/I MMU disabled mode are
2869
            // invalid in normal mode
2870
            if (oldreg != env->lsu) {
2871
                DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
2872
                            oldreg, env->lsu);
2873
#ifdef DEBUG_MMU
2874
                dump_mmu(env);
2875
#endif
2876
                tlb_flush(env, 1);
2877
            }
2878
            return;
2879
        }
2880
    case 0x50: // I-MMU regs
2881
        {
2882
            int reg = (addr >> 3) & 0xf;
2883
            uint64_t oldreg;
2884

    
2885
            oldreg = env->immuregs[reg];
2886
            switch(reg) {
2887
            case 0: // RO
2888
                return;
2889
            case 1: // Not in I-MMU
2890
            case 2:
2891
                return;
2892
            case 3: // SFSR
2893
                if ((val & 1) == 0)
2894
                    val = 0; // Clear SFSR
2895
                env->immu.sfsr = val;
2896
                break;
2897
            case 4: // RO
2898
                return;
2899
            case 5: // TSB access
2900
                DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
2901
                            PRIx64 "\n", env->immu.tsb, val);
2902
                env->immu.tsb = val;
2903
                break;
2904
            case 6: // Tag access
2905
                env->immu.tag_access = val;
2906
                break;
2907
            case 7:
2908
            case 8:
2909
                return;
2910
            default:
2911
                break;
2912
            }
2913

    
2914
            if (oldreg != env->immuregs[reg]) {
2915
                DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
2916
                            PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
2917
            }
2918
#ifdef DEBUG_MMU
2919
            dump_mmu(env);
2920
#endif
2921
            return;
2922
        }
2923
    case 0x54: // I-MMU data in
2924
        replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
2925
        return;
2926
    case 0x55: // I-MMU data access
2927
        {
2928
            // TODO: auto demap
2929

    
2930
            unsigned int i = (addr >> 3) & 0x3f;
2931

    
2932
            replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
2933

    
2934
#ifdef DEBUG_MMU
2935
            DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
2936
            dump_mmu(env);
2937
#endif
2938
            return;
2939
        }
2940
    case 0x57: // I-MMU demap
2941
        demap_tlb(env->itlb, addr, "immu", env);
2942
        return;
2943
    case 0x58: // D-MMU regs
2944
        {
2945
            int reg = (addr >> 3) & 0xf;
2946
            uint64_t oldreg;
2947

    
2948
            oldreg = env->dmmuregs[reg];
2949
            switch(reg) {
2950
            case 0: // RO
2951
            case 4:
2952
                return;
2953
            case 3: // SFSR
2954
                if ((val & 1) == 0) {
2955
                    val = 0; // Clear SFSR, Fault address
2956
                    env->dmmu.sfar = 0;
2957
                }
2958
                env->dmmu.sfsr = val;
2959
                break;
2960
            case 1: // Primary context
2961
                env->dmmu.mmu_primary_context = val;
2962
                /* can be optimized to only flush MMU_USER_IDX
2963
                   and MMU_KERNEL_IDX entries */
2964
                tlb_flush(env, 1);
2965
                break;
2966
            case 2: // Secondary context
2967
                env->dmmu.mmu_secondary_context = val;
2968
                /* can be optimized to only flush MMU_USER_SECONDARY_IDX
2969
                   and MMU_KERNEL_SECONDARY_IDX entries */
2970
                tlb_flush(env, 1);
2971
                break;
2972
            case 5: // TSB access
2973
                DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
2974
                            PRIx64 "\n", env->dmmu.tsb, val);
2975
                env->dmmu.tsb = val;
2976
                break;
2977
            case 6: // Tag access
2978
                env->dmmu.tag_access = val;
2979
                break;
2980
            case 7: // Virtual Watchpoint
2981
            case 8: // Physical Watchpoint
2982
            default:
2983
                env->dmmuregs[reg] = val;
2984
                break;
2985
            }
2986

    
2987
            if (oldreg != env->dmmuregs[reg]) {
2988
                DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
2989
                            PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
2990
            }
2991
#ifdef DEBUG_MMU
2992
            dump_mmu(env);
2993
#endif
2994
            return;
2995
        }
2996
    case 0x5c: // D-MMU data in
2997
        replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
2998
        return;
2999
    case 0x5d: // D-MMU data access
3000
        {
3001
            unsigned int i = (addr >> 3) & 0x3f;
3002

    
3003
            replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
3004

    
3005
#ifdef DEBUG_MMU
3006
            DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
3007
            dump_mmu(env);
3008
#endif
3009
            return;
3010
        }
3011
    case 0x5f: // D-MMU demap
3012
        demap_tlb(env->dtlb, addr, "dmmu", env);
3013
        return;
3014
    case 0x49: // Interrupt data receive
3015
        // XXX
3016
        return;
3017
    case 0x46: // D-cache data
3018
    case 0x47: // D-cache tag access
3019
    case 0x4b: // E-cache error enable
3020
    case 0x4c: // E-cache asynchronous fault status
3021
    case 0x4d: // E-cache asynchronous fault address
3022
    case 0x4e: // E-cache tag data
3023
    case 0x66: // I-cache instruction access
3024
    case 0x67: // I-cache tag access
3025
    case 0x6e: // I-cache predecode
3026
    case 0x6f: // I-cache LRU etc.
3027
    case 0x76: // E-cache tag
3028
    case 0x7e: // E-cache tag
3029
        return;
3030
    case 0x51: // I-MMU 8k TSB pointer, RO
3031
    case 0x52: // I-MMU 64k TSB pointer, RO
3032
    case 0x56: // I-MMU tag read, RO
3033
    case 0x59: // D-MMU 8k TSB pointer, RO
3034
    case 0x5a: // D-MMU 64k TSB pointer, RO
3035
    case 0x5b: // D-MMU data pointer, RO
3036
    case 0x5e: // D-MMU tag read, RO
3037
    case 0x48: // Interrupt dispatch, RO
3038
    case 0x7f: // Incoming interrupt vector, RO
3039
    case 0x82: // Primary no-fault, RO
3040
    case 0x83: // Secondary no-fault, RO
3041
    case 0x8a: // Primary no-fault LE, RO
3042
    case 0x8b: // Secondary no-fault LE, RO
3043
    default:
3044
        do_unassigned_access(addr, 1, 0, 1, size);
3045
        return;
3046
    }
3047
}
3048
#endif /* CONFIG_USER_ONLY */
3049

    
3050
void helper_ldda_asi(target_ulong addr, int asi, int rd)
3051
{
3052
    if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
3053
        || (cpu_has_hypervisor(env)
3054
            && asi >= 0x30 && asi < 0x80
3055
            && !(env->hpstate & HS_PRIV)))
3056
        raise_exception(TT_PRIV_ACT);
3057

    
3058
    switch (asi) {
3059
    case 0x24: // Nucleus quad LDD 128 bit atomic
3060
    case 0x2c: // Nucleus quad LDD 128 bit atomic LE
3061
        helper_check_align(addr, 0xf);
3062
        if (rd == 0) {
3063
            env->gregs[1] = ldq_nucleus(addr + 8);
3064
            if (asi == 0x2c)
3065
                bswap64s(&env->gregs[1]);
3066
        } else if (rd < 8) {
3067
            env->gregs[rd] = ldq_nucleus(addr);
3068
            env->gregs[rd + 1] = ldq_nucleus(addr + 8);
3069
            if (asi == 0x2c) {
3070
                bswap64s(&env->gregs[rd]);
3071
                bswap64s(&env->gregs[rd + 1]);
3072
            }
3073
        } else {
3074
            env->regwptr[rd] = ldq_nucleus(addr);
3075
            env->regwptr[rd + 1] = ldq_nucleus(addr + 8);
3076
            if (asi == 0x2c) {
3077
                bswap64s(&env->regwptr[rd]);
3078
                bswap64s(&env->regwptr[rd + 1]);
3079
            }
3080
        }
3081
        break;
3082
    default:
3083
        helper_check_align(addr, 0x3);
3084
        if (rd == 0)
3085
            env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
3086
        else if (rd < 8) {
3087
            env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
3088
            env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3089
        } else {
3090
            env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
3091
            env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3092
        }
3093
        break;
3094
    }
3095
}
3096

    
3097
void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
3098
{
3099
    unsigned int i;
3100
    target_ulong val;
3101

    
3102
    helper_check_align(addr, 3);
3103
    switch (asi) {
3104
    case 0xf0: // Block load primary
3105
    case 0xf1: // Block load secondary
3106
    case 0xf8: // Block load primary LE
3107
    case 0xf9: // Block load secondary LE
3108
        if (rd & 7) {
3109
            raise_exception(TT_ILL_INSN);
3110
            return;
3111
        }
3112
        helper_check_align(addr, 0x3f);
3113
        for (i = 0; i < 16; i++) {
3114
            *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
3115
                                                         0);
3116
            addr += 4;
3117
        }
3118

    
3119
        return;
3120
    default:
3121
        break;
3122
    }
3123

    
3124
    val = helper_ld_asi(addr, asi, size, 0);
3125
    switch(size) {
3126
    default:
3127
    case 4:
3128
        *((uint32_t *)&env->fpr[rd]) = val;
3129
        break;
3130
    case 8:
3131
        *((int64_t *)&DT0) = val;
3132
        break;
3133
    case 16:
3134
        // XXX
3135
        break;
3136
    }
3137
}
3138

    
3139
void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
3140
{
3141
    unsigned int i;
3142
    target_ulong val = 0;
3143

    
3144
    helper_check_align(addr, 3);
3145
    switch (asi) {
3146
    case 0xe0: // UA2007 Block commit store primary (cache flush)
3147
    case 0xe1: // UA2007 Block commit store secondary (cache flush)
3148
    case 0xf0: // Block store primary
3149
    case 0xf1: // Block store secondary
3150
    case 0xf8: // Block store primary LE
3151
    case 0xf9: // Block store secondary LE
3152
        if (rd & 7) {
3153
            raise_exception(TT_ILL_INSN);
3154
            return;
3155
        }
3156
        helper_check_align(addr, 0x3f);
3157
        for (i = 0; i < 16; i++) {
3158
            val = *(uint32_t *)&env->fpr[rd++];
3159
            helper_st_asi(addr, val, asi & 0x8f, 4);
3160
            addr += 4;
3161
        }
3162

    
3163
        return;
3164
    default:
3165
        break;
3166
    }
3167

    
3168
    switch(size) {
3169
    default:
3170
    case 4:
3171
        val = *((uint32_t *)&env->fpr[rd]);
3172
        break;
3173
    case 8:
3174
        val = *((int64_t *)&DT0);
3175
        break;
3176
    case 16:
3177
        // XXX
3178
        break;
3179
    }
3180
    helper_st_asi(addr, val, asi, size);
3181
}
3182

    
3183
target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
3184
                            target_ulong val2, uint32_t asi)
3185
{
3186
    target_ulong ret;
3187

    
3188
    val2 &= 0xffffffffUL;
3189
    ret = helper_ld_asi(addr, asi, 4, 0);
3190
    ret &= 0xffffffffUL;
3191
    if (val2 == ret)
3192
        helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4);
3193
    return ret;
3194
}
3195

    
3196
target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
3197
                             target_ulong val2, uint32_t asi)
3198
{
3199
    target_ulong ret;
3200

    
3201
    ret = helper_ld_asi(addr, asi, 8, 0);
3202
    if (val2 == ret)
3203
        helper_st_asi(addr, val1, asi, 8);
3204
    return ret;
3205
}
3206
#endif /* TARGET_SPARC64 */
3207

    
3208
#ifndef TARGET_SPARC64
3209
void helper_rett(void)
3210
{
3211
    unsigned int cwp;
3212

    
3213
    if (env->psret == 1)
3214
        raise_exception(TT_ILL_INSN);
3215

    
3216
    env->psret = 1;
3217
    cwp = cwp_inc(env->cwp + 1) ;
3218
    if (env->wim & (1 << cwp)) {
3219
        raise_exception(TT_WIN_UNF);
3220
    }
3221
    set_cwp(cwp);
3222
    env->psrs = env->psrps;
3223
}
3224
#endif
3225

    
3226
target_ulong helper_udiv(target_ulong a, target_ulong b)
3227
{
3228
    uint64_t x0;
3229
    uint32_t x1;
3230

    
3231
    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3232
    x1 = b;
3233

    
3234
    if (x1 == 0) {
3235
        raise_exception(TT_DIV_ZERO);
3236
    }
3237

    
3238
    x0 = x0 / x1;
3239
    if (x0 > 0xffffffff) {
3240
        env->cc_src2 = 1;
3241
        return 0xffffffff;
3242
    } else {
3243
        env->cc_src2 = 0;
3244
        return x0;
3245
    }
3246
}
3247

    
3248
target_ulong helper_sdiv(target_ulong a, target_ulong b)
3249
{
3250
    int64_t x0;
3251
    int32_t x1;
3252

    
3253
    x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3254
    x1 = b;
3255

    
3256
    if (x1 == 0) {
3257
        raise_exception(TT_DIV_ZERO);
3258
    }
3259

    
3260
    x0 = x0 / x1;
3261
    if ((int32_t) x0 != x0) {
3262
        env->cc_src2 = 1;
3263
        return x0 < 0? 0x80000000: 0x7fffffff;
3264
    } else {
3265
        env->cc_src2 = 0;
3266
        return x0;
3267
    }
3268
}
3269

    
3270
void helper_stdf(target_ulong addr, int mem_idx)
3271
{
3272
    helper_check_align(addr, 7);
3273
#if !defined(CONFIG_USER_ONLY)
3274
    switch (mem_idx) {
3275
    case 0:
3276
        stfq_user(addr, DT0);
3277
        break;
3278
    case 1:
3279
        stfq_kernel(addr, DT0);
3280
        break;
3281
#ifdef TARGET_SPARC64
3282
    case 2:
3283
        stfq_hypv(addr, DT0);
3284
        break;
3285
#endif
3286
    default:
3287
        break;
3288
    }
3289
#else
3290
    stfq_raw(address_mask(env, addr), DT0);
3291
#endif
3292
}
3293

    
3294
void helper_lddf(target_ulong addr, int mem_idx)
3295
{
3296
    helper_check_align(addr, 7);
3297
#if !defined(CONFIG_USER_ONLY)
3298
    switch (mem_idx) {
3299
    case 0:
3300
        DT0 = ldfq_user(addr);
3301
        break;
3302
    case 1:
3303
        DT0 = ldfq_kernel(addr);
3304
        break;
3305
#ifdef TARGET_SPARC64
3306
    case 2:
3307
        DT0 = ldfq_hypv(addr);
3308
        break;
3309
#endif
3310
    default:
3311
        break;
3312
    }
3313
#else
3314
    DT0 = ldfq_raw(address_mask(env, addr));
3315
#endif
3316
}
3317

    
3318
void helper_ldqf(target_ulong addr, int mem_idx)
3319
{
3320
    // XXX add 128 bit load
3321
    CPU_QuadU u;
3322

    
3323
    helper_check_align(addr, 7);
3324
#if !defined(CONFIG_USER_ONLY)
3325
    switch (mem_idx) {
3326
    case 0:
3327
        u.ll.upper = ldq_user(addr);
3328
        u.ll.lower = ldq_user(addr + 8);
3329
        QT0 = u.q;
3330
        break;
3331
    case 1:
3332
        u.ll.upper = ldq_kernel(addr);
3333
        u.ll.lower = ldq_kernel(addr + 8);
3334
        QT0 = u.q;
3335
        break;
3336
#ifdef TARGET_SPARC64
3337
    case 2:
3338
        u.ll.upper = ldq_hypv(addr);
3339
        u.ll.lower = ldq_hypv(addr + 8);
3340
        QT0 = u.q;
3341
        break;
3342
#endif
3343
    default:
3344
        break;
3345
    }
3346
#else
3347
    u.ll.upper = ldq_raw(address_mask(env, addr));
3348
    u.ll.lower = ldq_raw(address_mask(env, addr + 8));
3349
    QT0 = u.q;
3350
#endif
3351
}
3352

    
3353
void helper_stqf(target_ulong addr, int mem_idx)
3354
{
3355
    // XXX add 128 bit store
3356
    CPU_QuadU u;
3357

    
3358
    helper_check_align(addr, 7);
3359
#if !defined(CONFIG_USER_ONLY)
3360
    switch (mem_idx) {
3361
    case 0:
3362
        u.q = QT0;
3363
        stq_user(addr, u.ll.upper);
3364
        stq_user(addr + 8, u.ll.lower);
3365
        break;
3366
    case 1:
3367
        u.q = QT0;
3368
        stq_kernel(addr, u.ll.upper);
3369
        stq_kernel(addr + 8, u.ll.lower);
3370
        break;
3371
#ifdef TARGET_SPARC64
3372
    case 2:
3373
        u.q = QT0;
3374
        stq_hypv(addr, u.ll.upper);
3375
        stq_hypv(addr + 8, u.ll.lower);
3376
        break;
3377
#endif
3378
    default:
3379
        break;
3380
    }
3381
#else
3382
    u.q = QT0;
3383
    stq_raw(address_mask(env, addr), u.ll.upper);
3384
    stq_raw(address_mask(env, addr + 8), u.ll.lower);
3385
#endif
3386
}
3387

    
3388
static inline void set_fsr(void)
3389
{
3390
    int rnd_mode;
3391

    
3392
    switch (env->fsr & FSR_RD_MASK) {
3393
    case FSR_RD_NEAREST:
3394
        rnd_mode = float_round_nearest_even;
3395
        break;
3396
    default:
3397
    case FSR_RD_ZERO:
3398
        rnd_mode = float_round_to_zero;
3399
        break;
3400
    case FSR_RD_POS:
3401
        rnd_mode = float_round_up;
3402
        break;
3403
    case FSR_RD_NEG:
3404
        rnd_mode = float_round_down;
3405
        break;
3406
    }
3407
    set_float_rounding_mode(rnd_mode, &env->fp_status);
3408
}
3409

    
3410
void helper_ldfsr(uint32_t new_fsr)
3411
{
3412
    env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK);
3413
    set_fsr();
3414
}
3415

    
3416
#ifdef TARGET_SPARC64
3417
void helper_ldxfsr(uint64_t new_fsr)
3418
{
3419
    env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK);
3420
    set_fsr();
3421
}
3422
#endif
3423

    
3424
void helper_debug(void)
3425
{
3426
    env->exception_index = EXCP_DEBUG;
3427
    cpu_loop_exit();
3428
}
3429

    
3430
#ifndef TARGET_SPARC64
3431
/* XXX: use another pointer for %iN registers to avoid slow wrapping
3432
   handling ? */
3433
void helper_save(void)
3434
{
3435
    uint32_t cwp;
3436

    
3437
    cwp = cwp_dec(env->cwp - 1);
3438
    if (env->wim & (1 << cwp)) {
3439
        raise_exception(TT_WIN_OVF);
3440
    }
3441
    set_cwp(cwp);
3442
}
3443

    
3444
void helper_restore(void)
3445
{
3446
    uint32_t cwp;
3447

    
3448
    cwp = cwp_inc(env->cwp + 1);
3449
    if (env->wim & (1 << cwp)) {
3450
        raise_exception(TT_WIN_UNF);
3451
    }
3452
    set_cwp(cwp);
3453
}
3454

    
3455
void helper_wrpsr(target_ulong new_psr)
3456
{
3457
    if ((new_psr & PSR_CWP) >= env->nwindows) {
3458
        raise_exception(TT_ILL_INSN);
3459
    } else {
3460
        cpu_put_psr(env, new_psr);
3461
    }
3462
}
3463

    
3464
target_ulong helper_rdpsr(void)
3465
{
3466
    return get_psr();
3467
}
3468

    
3469
#else
3470
/* XXX: use another pointer for %iN registers to avoid slow wrapping
3471
   handling ? */
3472
void helper_save(void)
3473
{
3474
    uint32_t cwp;
3475

    
3476
    cwp = cwp_dec(env->cwp - 1);
3477
    if (env->cansave == 0) {
3478
        raise_exception(TT_SPILL | (env->otherwin != 0 ?
3479
                                    (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3480
                                    ((env->wstate & 0x7) << 2)));
3481
    } else {
3482
        if (env->cleanwin - env->canrestore == 0) {
3483
            // XXX Clean windows without trap
3484
            raise_exception(TT_CLRWIN);
3485
        } else {
3486
            env->cansave--;
3487
            env->canrestore++;
3488
            set_cwp(cwp);
3489
        }
3490
    }
3491
}
3492

    
3493
void helper_restore(void)
3494
{
3495
    uint32_t cwp;
3496

    
3497
    cwp = cwp_inc(env->cwp + 1);
3498
    if (env->canrestore == 0) {
3499
        raise_exception(TT_FILL | (env->otherwin != 0 ?
3500
                                   (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3501
                                   ((env->wstate & 0x7) << 2)));
3502
    } else {
3503
        env->cansave++;
3504
        env->canrestore--;
3505
        set_cwp(cwp);
3506
    }
3507
}
3508

    
3509
void helper_flushw(void)
3510
{
3511
    if (env->cansave != env->nwindows - 2) {
3512
        raise_exception(TT_SPILL | (env->otherwin != 0 ?
3513
                                    (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3514
                                    ((env->wstate & 0x7) << 2)));
3515
    }
3516
}
3517

    
3518
void helper_saved(void)
3519
{
3520
    env->cansave++;
3521
    if (env->otherwin == 0)
3522
        env->canrestore--;
3523
    else
3524
        env->otherwin--;
3525
}
3526

    
3527
void helper_restored(void)
3528
{
3529
    env->canrestore++;
3530
    if (env->cleanwin < env->nwindows - 1)
3531
        env->cleanwin++;
3532
    if (env->otherwin == 0)
3533
        env->cansave--;
3534
    else
3535
        env->otherwin--;
3536
}
3537

    
3538
static target_ulong get_ccr(void)
3539
{
3540
    target_ulong psr;
3541

    
3542
    psr = get_psr();
3543

    
3544
    return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20);
3545
}
3546

    
3547
target_ulong cpu_get_ccr(CPUState *env1)
3548
{
3549
    CPUState *saved_env;
3550
    target_ulong ret;
3551

    
3552
    saved_env = env;
3553
    env = env1;
3554
    ret = get_ccr();
3555
    env = saved_env;
3556
    return ret;
3557
}
3558

    
3559
static void put_ccr(target_ulong val)
3560
{
3561
    target_ulong tmp = val;
3562

    
3563
    env->xcc = (tmp >> 4) << 20;
3564
    env->psr = (tmp & 0xf) << 20;
3565
    CC_OP = CC_OP_FLAGS;
3566
}
3567

    
3568
void cpu_put_ccr(CPUState *env1, target_ulong val)
3569
{
3570
    CPUState *saved_env;
3571

    
3572
    saved_env = env;
3573
    env = env1;
3574
    put_ccr(val);
3575
    env = saved_env;
3576
}
3577

    
3578
static target_ulong get_cwp64(void)
3579
{
3580
    return env->nwindows - 1 - env->cwp;
3581
}
3582

    
3583
target_ulong cpu_get_cwp64(CPUState *env1)
3584
{
3585
    CPUState *saved_env;
3586
    target_ulong ret;
3587

    
3588
    saved_env = env;
3589
    env = env1;
3590
    ret = get_cwp64();
3591
    env = saved_env;
3592
    return ret;
3593
}
3594

    
3595
static void put_cwp64(int cwp)
3596
{
3597
    if (unlikely(cwp >= env->nwindows || cwp < 0)) {
3598
        cwp %= env->nwindows;
3599
    }
3600
    set_cwp(env->nwindows - 1 - cwp);
3601
}
3602

    
3603
void cpu_put_cwp64(CPUState *env1, int cwp)
3604
{
3605
    CPUState *saved_env;
3606

    
3607
    saved_env = env;
3608
    env = env1;
3609
    put_cwp64(cwp);
3610
    env = saved_env;
3611
}
3612

    
3613
target_ulong helper_rdccr(void)
3614
{
3615
    return get_ccr();
3616
}
3617

    
3618
void helper_wrccr(target_ulong new_ccr)
3619
{
3620
    put_ccr(new_ccr);
3621
}
3622

    
3623
// CWP handling is reversed in V9, but we still use the V8 register
3624
// order.
3625
target_ulong helper_rdcwp(void)
3626
{
3627
    return get_cwp64();
3628
}
3629

    
3630
void helper_wrcwp(target_ulong new_cwp)
3631
{
3632
    put_cwp64(new_cwp);
3633
}
3634

    
3635
// This function uses non-native bit order
3636
#define GET_FIELD(X, FROM, TO)                                  \
3637
    ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
3638

    
3639
// This function uses the order in the manuals, i.e. bit 0 is 2^0
3640
#define GET_FIELD_SP(X, FROM, TO)               \
3641
    GET_FIELD(X, 63 - (TO), 63 - (FROM))
3642

    
3643
target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
3644
{
3645
    return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
3646
        (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
3647
        (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
3648
        (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
3649
        (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
3650
        (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
3651
        (((pixel_addr >> 55) & 1) << 4) |
3652
        (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
3653
        GET_FIELD_SP(pixel_addr, 11, 12);
3654
}
3655

    
3656
target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
3657
{
3658
    uint64_t tmp;
3659

    
3660
    tmp = addr + offset;
3661
    env->gsr &= ~7ULL;
3662
    env->gsr |= tmp & 7ULL;
3663
    return tmp & ~7ULL;
3664
}
3665

    
3666
target_ulong helper_popc(target_ulong val)
3667
{
3668
    return ctpop64(val);
3669
}
3670

    
3671
static inline uint64_t *get_gregset(uint32_t pstate)
3672
{
3673
    switch (pstate) {
3674
    default:
3675
        DPRINTF_PSTATE("ERROR in get_gregset: active pstate bits=%x%s%s%s\n",
3676
                pstate,
3677
                (pstate & PS_IG) ? " IG" : "",
3678
                (pstate & PS_MG) ? " MG" : "",
3679
                (pstate & PS_AG) ? " AG" : "");
3680
        /* pass through to normal set of global registers */
3681
    case 0:
3682
        return env->bgregs;
3683
    case PS_AG:
3684
        return env->agregs;
3685
    case PS_MG:
3686
        return env->mgregs;
3687
    case PS_IG:
3688
        return env->igregs;
3689
    }
3690
}
3691

    
3692
static inline void change_pstate(uint32_t new_pstate)
3693
{
3694
    uint32_t pstate_regs, new_pstate_regs;
3695
    uint64_t *src, *dst;
3696

    
3697
    if (env->def->features & CPU_FEATURE_GL) {
3698
        // PS_AG is not implemented in this case
3699
        new_pstate &= ~PS_AG;
3700
    }
3701

    
3702
    pstate_regs = env->pstate & 0xc01;
3703
    new_pstate_regs = new_pstate & 0xc01;
3704

    
3705
    if (new_pstate_regs != pstate_regs) {
3706
        DPRINTF_PSTATE("change_pstate: switching regs old=%x new=%x\n",
3707
                       pstate_regs, new_pstate_regs);
3708
        // Switch global register bank
3709
        src = get_gregset(new_pstate_regs);
3710
        dst = get_gregset(pstate_regs);
3711
        memcpy32(dst, env->gregs);
3712
        memcpy32(env->gregs, src);
3713
    }
3714
    else {
3715
        DPRINTF_PSTATE("change_pstate: regs new=%x (unchanged)\n",
3716
                       new_pstate_regs);
3717
    }
3718
    env->pstate = new_pstate;
3719
}
3720

    
3721
void helper_wrpstate(target_ulong new_state)
3722
{
3723
    change_pstate(new_state & 0xf3f);
3724

    
3725
#if !defined(CONFIG_USER_ONLY)
3726
    if (cpu_interrupts_enabled(env)) {
3727
        cpu_check_irqs(env);
3728
    }
3729
#endif
3730
}
3731

    
3732
void helper_wrpil(target_ulong new_pil)
3733
{
3734
#if !defined(CONFIG_USER_ONLY)
3735
    DPRINTF_PSTATE("helper_wrpil old=%x new=%x\n",
3736
                   env->psrpil, (uint32_t)new_pil);
3737

    
3738
    env->psrpil = new_pil;
3739

    
3740
    if (cpu_interrupts_enabled(env)) {
3741
        cpu_check_irqs(env);
3742
    }
3743
#endif
3744
}
3745

    
3746
void helper_done(void)
3747
{
3748
    trap_state* tsptr = cpu_tsptr(env);
3749

    
3750
    env->pc = tsptr->tnpc;
3751
    env->npc = tsptr->tnpc + 4;
3752
    put_ccr(tsptr->tstate >> 32);
3753
    env->asi = (tsptr->tstate >> 24) & 0xff;
3754
    change_pstate((tsptr->tstate >> 8) & 0xf3f);
3755
    put_cwp64(tsptr->tstate & 0xff);
3756
    env->tl--;
3757

    
3758
    DPRINTF_PSTATE("... helper_done tl=%d\n", env->tl);
3759

    
3760
#if !defined(CONFIG_USER_ONLY)
3761
    if (cpu_interrupts_enabled(env)) {
3762
        cpu_check_irqs(env);
3763
    }
3764
#endif
3765
}
3766

    
3767
void helper_retry(void)
3768
{
3769
    trap_state* tsptr = cpu_tsptr(env);
3770

    
3771
    env->pc = tsptr->tpc;
3772
    env->npc = tsptr->tnpc;
3773
    put_ccr(tsptr->tstate >> 32);
3774
    env->asi = (tsptr->tstate >> 24) & 0xff;
3775
    change_pstate((tsptr->tstate >> 8) & 0xf3f);
3776
    put_cwp64(tsptr->tstate & 0xff);
3777
    env->tl--;
3778

    
3779
    DPRINTF_PSTATE("... helper_retry tl=%d\n", env->tl);
3780

    
3781
#if !defined(CONFIG_USER_ONLY)
3782
    if (cpu_interrupts_enabled(env)) {
3783
        cpu_check_irqs(env);
3784
    }
3785
#endif
3786
}
3787

    
3788
static void do_modify_softint(const char* operation, uint32_t value)
3789
{
3790
    if (env->softint != value) {
3791
        env->softint = value;
3792
        DPRINTF_PSTATE(": %s new %08x\n", operation, env->softint);
3793
#if !defined(CONFIG_USER_ONLY)
3794
        if (cpu_interrupts_enabled(env)) {
3795
            cpu_check_irqs(env);
3796
        }
3797
#endif
3798
    }
3799
}
3800

    
3801
void helper_set_softint(uint64_t value)
3802
{
3803
    do_modify_softint("helper_set_softint", env->softint | (uint32_t)value);
3804
}
3805

    
3806
void helper_clear_softint(uint64_t value)
3807
{
3808
    do_modify_softint("helper_clear_softint", env->softint & (uint32_t)~value);
3809
}
3810

    
3811
void helper_write_softint(uint64_t value)
3812
{
3813
    do_modify_softint("helper_write_softint", (uint32_t)value);
3814
}
3815
#endif
3816

    
3817
void helper_flush(target_ulong addr)
3818
{
3819
    addr &= ~7;
3820
    tb_invalidate_page_range(addr, addr + 8);
3821
}
3822

    
3823
#ifdef TARGET_SPARC64
3824
#ifdef DEBUG_PCALL
3825
static const char * const excp_names[0x80] = {
3826
    [TT_TFAULT] = "Instruction Access Fault",
3827
    [TT_TMISS] = "Instruction Access MMU Miss",
3828
    [TT_CODE_ACCESS] = "Instruction Access Error",
3829
    [TT_ILL_INSN] = "Illegal Instruction",
3830
    [TT_PRIV_INSN] = "Privileged Instruction",
3831
    [TT_NFPU_INSN] = "FPU Disabled",
3832
    [TT_FP_EXCP] = "FPU Exception",
3833
    [TT_TOVF] = "Tag Overflow",
3834
    [TT_CLRWIN] = "Clean Windows",
3835
    [TT_DIV_ZERO] = "Division By Zero",
3836
    [TT_DFAULT] = "Data Access Fault",
3837
    [TT_DMISS] = "Data Access MMU Miss",
3838
    [TT_DATA_ACCESS] = "Data Access Error",
3839
    [TT_DPROT] = "Data Protection Error",
3840
    [TT_UNALIGNED] = "Unaligned Memory Access",
3841
    [TT_PRIV_ACT] = "Privileged Action",
3842
    [TT_EXTINT | 0x1] = "External Interrupt 1",
3843
    [TT_EXTINT | 0x2] = "External Interrupt 2",
3844
    [TT_EXTINT | 0x3] = "External Interrupt 3",
3845
    [TT_EXTINT | 0x4] = "External Interrupt 4",
3846
    [TT_EXTINT | 0x5] = "External Interrupt 5",
3847
    [TT_EXTINT | 0x6] = "External Interrupt 6",
3848
    [TT_EXTINT | 0x7] = "External Interrupt 7",
3849
    [TT_EXTINT | 0x8] = "External Interrupt 8",
3850
    [TT_EXTINT | 0x9] = "External Interrupt 9",
3851
    [TT_EXTINT | 0xa] = "External Interrupt 10",
3852
    [TT_EXTINT | 0xb] = "External Interrupt 11",
3853
    [TT_EXTINT | 0xc] = "External Interrupt 12",
3854
    [TT_EXTINT | 0xd] = "External Interrupt 13",
3855
    [TT_EXTINT | 0xe] = "External Interrupt 14",
3856
    [TT_EXTINT | 0xf] = "External Interrupt 15",
3857
};
3858
#endif
3859

    
3860
trap_state* cpu_tsptr(CPUState* env)
3861
{
3862
    return &env->ts[env->tl & MAXTL_MASK];
3863
}
3864

    
3865
void do_interrupt(CPUState *env)
3866
{
3867
    int intno = env->exception_index;
3868
    trap_state* tsptr;
3869

    
3870
#ifdef DEBUG_PCALL
3871
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
3872
        static int count;
3873
        const char *name;
3874

    
3875
        if (intno < 0 || intno >= 0x180)
3876
            name = "Unknown";
3877
        else if (intno >= 0x100)
3878
            name = "Trap Instruction";
3879
        else if (intno >= 0xc0)
3880
            name = "Window Fill";
3881
        else if (intno >= 0x80)
3882
            name = "Window Spill";
3883
        else {
3884
            name = excp_names[intno];
3885
            if (!name)
3886
                name = "Unknown";
3887
        }
3888

    
3889
        qemu_log("%6d: %s (v=%04x) pc=%016" PRIx64 " npc=%016" PRIx64
3890
                " SP=%016" PRIx64 "\n",
3891
                count, name, intno,
3892
                env->pc,
3893
                env->npc, env->regwptr[6]);
3894
        log_cpu_state(env, 0);
3895
#if 0
3896
        {
3897
            int i;
3898
            uint8_t *ptr;
3899

3900
            qemu_log("       code=");
3901
            ptr = (uint8_t *)env->pc;
3902
            for(i = 0; i < 16; i++) {
3903
                qemu_log(" %02x", ldub(ptr + i));
3904
            }
3905
            qemu_log("\n");
3906
        }
3907
#endif
3908
        count++;
3909
    }
3910
#endif
3911
#if !defined(CONFIG_USER_ONLY)
3912
    if (env->tl >= env->maxtl) {
3913
        cpu_abort(env, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
3914
                  " Error state", env->exception_index, env->tl, env->maxtl);
3915
        return;
3916
    }
3917
#endif
3918
    if (env->tl < env->maxtl - 1) {
3919
        env->tl++;
3920
    } else {
3921
        env->pstate |= PS_RED;
3922
        if (env->tl < env->maxtl)
3923
            env->tl++;
3924
    }
3925
    tsptr = cpu_tsptr(env);
3926

    
3927
    tsptr->tstate = (get_ccr() << 32) |
3928
        ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
3929
        get_cwp64();
3930
    tsptr->tpc = env->pc;
3931
    tsptr->tnpc = env->npc;
3932
    tsptr->tt = intno;
3933

    
3934
    switch (intno) {
3935
    case TT_IVEC:
3936
        change_pstate(PS_PEF | PS_PRIV | PS_IG);
3937
        break;
3938
    case TT_TFAULT:
3939
    case TT_DFAULT:
3940
    case TT_TMISS ... TT_TMISS + 3:
3941
    case TT_DMISS ... TT_DMISS + 3:
3942
    case TT_DPROT ... TT_DPROT + 3:
3943
        change_pstate(PS_PEF | PS_PRIV | PS_MG);
3944
        break;
3945
    default:
3946
        change_pstate(PS_PEF | PS_PRIV | PS_AG);
3947
        break;
3948
    }
3949

    
3950
    if (intno == TT_CLRWIN) {
3951
        set_cwp(cwp_dec(env->cwp - 1));
3952
    } else if ((intno & 0x1c0) == TT_SPILL) {
3953
        set_cwp(cwp_dec(env->cwp - env->cansave - 2));
3954
    } else if ((intno & 0x1c0) == TT_FILL) {
3955
        set_cwp(cwp_inc(env->cwp + 1));
3956
    }
3957
    env->tbr &= ~0x7fffULL;
3958
    env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
3959
    env->pc = env->tbr;
3960
    env->npc = env->pc + 4;
3961
    env->exception_index = -1;
3962
}
3963
#else
3964
#ifdef DEBUG_PCALL
3965
static const char * const excp_names[0x80] = {
3966
    [TT_TFAULT] = "Instruction Access Fault",
3967
    [TT_ILL_INSN] = "Illegal Instruction",
3968
    [TT_PRIV_INSN] = "Privileged Instruction",
3969
    [TT_NFPU_INSN] = "FPU Disabled",
3970
    [TT_WIN_OVF] = "Window Overflow",
3971
    [TT_WIN_UNF] = "Window Underflow",
3972
    [TT_UNALIGNED] = "Unaligned Memory Access",
3973
    [TT_FP_EXCP] = "FPU Exception",
3974
    [TT_DFAULT] = "Data Access Fault",
3975
    [TT_TOVF] = "Tag Overflow",
3976
    [TT_EXTINT | 0x1] = "External Interrupt 1",
3977
    [TT_EXTINT | 0x2] = "External Interrupt 2",
3978
    [TT_EXTINT | 0x3] = "External Interrupt 3",
3979
    [TT_EXTINT | 0x4] = "External Interrupt 4",
3980
    [TT_EXTINT | 0x5] = "External Interrupt 5",
3981
    [TT_EXTINT | 0x6] = "External Interrupt 6",
3982
    [TT_EXTINT | 0x7] = "External Interrupt 7",
3983
    [TT_EXTINT | 0x8] = "External Interrupt 8",
3984
    [TT_EXTINT | 0x9] = "External Interrupt 9",
3985
    [TT_EXTINT | 0xa] = "External Interrupt 10",
3986
    [TT_EXTINT | 0xb] = "External Interrupt 11",
3987
    [TT_EXTINT | 0xc] = "External Interrupt 12",
3988
    [TT_EXTINT | 0xd] = "External Interrupt 13",
3989
    [TT_EXTINT | 0xe] = "External Interrupt 14",
3990
    [TT_EXTINT | 0xf] = "External Interrupt 15",
3991
    [TT_TOVF] = "Tag Overflow",
3992
    [TT_CODE_ACCESS] = "Instruction Access Error",
3993
    [TT_DATA_ACCESS] = "Data Access Error",
3994
    [TT_DIV_ZERO] = "Division By Zero",
3995
    [TT_NCP_INSN] = "Coprocessor Disabled",
3996
};
3997
#endif
3998

    
3999
void do_interrupt(CPUState *env)
4000
{
4001
    int cwp, intno = env->exception_index;
4002

    
4003
#ifdef DEBUG_PCALL
4004
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
4005
        static int count;
4006
        const char *name;
4007

    
4008
        if (intno < 0 || intno >= 0x100)
4009
            name = "Unknown";
4010
        else if (intno >= 0x80)
4011
            name = "Trap Instruction";
4012
        else {
4013
            name = excp_names[intno];
4014
            if (!name)
4015
                name = "Unknown";
4016
        }
4017

    
4018
        qemu_log("%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
4019
                count, name, intno,
4020
                env->pc,
4021
                env->npc, env->regwptr[6]);
4022
        log_cpu_state(env, 0);
4023
#if 0
4024
        {
4025
            int i;
4026
            uint8_t *ptr;
4027

4028
            qemu_log("       code=");
4029
            ptr = (uint8_t *)env->pc;
4030
            for(i = 0; i < 16; i++) {
4031
                qemu_log(" %02x", ldub(ptr + i));
4032
            }
4033
            qemu_log("\n");
4034
        }
4035
#endif
4036
        count++;
4037
    }
4038
#endif
4039
#if !defined(CONFIG_USER_ONLY)
4040
    if (env->psret == 0) {
4041
        cpu_abort(env, "Trap 0x%02x while interrupts disabled, Error state",
4042
                  env->exception_index);
4043
        return;
4044
    }
4045
#endif
4046
    env->psret = 0;
4047
    cwp = cwp_dec(env->cwp - 1);
4048
    set_cwp(cwp);
4049
    env->regwptr[9] = env->pc;
4050
    env->regwptr[10] = env->npc;
4051
    env->psrps = env->psrs;
4052
    env->psrs = 1;
4053
    env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
4054
    env->pc = env->tbr;
4055
    env->npc = env->pc + 4;
4056
    env->exception_index = -1;
4057
}
4058
#endif
4059

    
4060
#if !defined(CONFIG_USER_ONLY)
4061

    
4062
static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4063
                                void *retaddr);
4064

    
4065
#define MMUSUFFIX _mmu
4066
#define ALIGNED_ONLY
4067

    
4068
#define SHIFT 0
4069
#include "softmmu_template.h"
4070

    
4071
#define SHIFT 1
4072
#include "softmmu_template.h"
4073

    
4074
#define SHIFT 2
4075
#include "softmmu_template.h"
4076

    
4077
#define SHIFT 3
4078
#include "softmmu_template.h"
4079

    
4080
/* XXX: make it generic ? */
4081
static void cpu_restore_state2(void *retaddr)
4082
{
4083
    TranslationBlock *tb;
4084
    unsigned long pc;
4085

    
4086
    if (retaddr) {
4087
        /* now we have a real cpu fault */
4088
        pc = (unsigned long)retaddr;
4089
        tb = tb_find_pc(pc);
4090
        if (tb) {
4091
            /* the PC is inside the translated code. It means that we have
4092
               a virtual CPU fault */
4093
            cpu_restore_state(tb, env, pc, (void *)(long)env->cond);
4094
        }
4095
    }
4096
}
4097

    
4098
static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4099
                                void *retaddr)
4100
{
4101
#ifdef DEBUG_UNALIGNED
4102
    printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
4103
           "\n", addr, env->pc);
4104
#endif
4105
    cpu_restore_state2(retaddr);
4106
    raise_exception(TT_UNALIGNED);
4107
}
4108

    
4109
/* try to fill the TLB and return an exception if error. If retaddr is
4110
   NULL, it means that the function was called in C code (i.e. not
4111
   from generated code or from helper.c) */
4112
/* XXX: fix it to restore all registers */
4113
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4114
{
4115
    int ret;
4116
    CPUState *saved_env;
4117

    
4118
    /* XXX: hack to restore env in all cases, even if not called from
4119
       generated code */
4120
    saved_env = env;
4121
    env = cpu_single_env;
4122

    
4123
    ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4124
    if (ret) {
4125
        cpu_restore_state2(retaddr);
4126
        cpu_loop_exit();
4127
    }
4128
    env = saved_env;
4129
}
4130

    
4131
#endif /* !CONFIG_USER_ONLY */
4132

    
4133
#ifndef TARGET_SPARC64
4134
#if !defined(CONFIG_USER_ONLY)
4135
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
4136
                          int is_asi, int size)
4137
{
4138
    CPUState *saved_env;
4139
    int fault_type;
4140

    
4141
    /* XXX: hack to restore env in all cases, even if not called from
4142
       generated code */
4143
    saved_env = env;
4144
    env = cpu_single_env;
4145
#ifdef DEBUG_UNASSIGNED
4146
    if (is_asi)
4147
        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4148
               " asi 0x%02x from " TARGET_FMT_lx "\n",
4149
               is_exec ? "exec" : is_write ? "write" : "read", size,
4150
               size == 1 ? "" : "s", addr, is_asi, env->pc);
4151
    else
4152
        printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4153
               " from " TARGET_FMT_lx "\n",
4154
               is_exec ? "exec" : is_write ? "write" : "read", size,
4155
               size == 1 ? "" : "s", addr, env->pc);
4156
#endif
4157
    /* Don't overwrite translation and access faults */
4158
    fault_type = (env->mmuregs[3] & 0x1c) >> 2;
4159
    if ((fault_type > 4) || (fault_type == 0)) {
4160
        env->mmuregs[3] = 0; /* Fault status register */
4161
        if (is_asi)
4162
            env->mmuregs[3] |= 1 << 16;
4163
        if (env->psrs)
4164
            env->mmuregs[3] |= 1 << 5;
4165
        if (is_exec)
4166
            env->mmuregs[3] |= 1 << 6;
4167
        if (is_write)
4168
            env->mmuregs[3] |= 1 << 7;
4169
        env->mmuregs[3] |= (5 << 2) | 2;
4170
        /* SuperSPARC will never place instruction fault addresses in the FAR */
4171
        if (!is_exec) {
4172
            env->mmuregs[4] = addr; /* Fault address register */
4173
        }
4174
    }
4175
    /* overflow (same type fault was not read before another fault) */
4176
    if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
4177
        env->mmuregs[3] |= 1;
4178
    }
4179

    
4180
    if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
4181
        if (is_exec)
4182
            raise_exception(TT_CODE_ACCESS);
4183
        else
4184
            raise_exception(TT_DATA_ACCESS);
4185
    }
4186

    
4187
    /* flush neverland mappings created during no-fault mode,
4188
       so the sequential MMU faults report proper fault types */
4189
    if (env->mmuregs[0] & MMU_NF) {
4190
        tlb_flush(env, 1);
4191
    }
4192

    
4193
    env = saved_env;
4194
}
4195
#endif
4196
#else
4197
#if defined(CONFIG_USER_ONLY)
4198
static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
4199
                          int is_asi, int size)
4200
#else
4201
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
4202
                          int is_asi, int size)
4203
#endif
4204
{
4205
    CPUState *saved_env;
4206

    
4207
    /* XXX: hack to restore env in all cases, even if not called from
4208
       generated code */
4209
    saved_env = env;
4210
    env = cpu_single_env;
4211

    
4212
#ifdef DEBUG_UNASSIGNED
4213
    printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
4214
           "\n", addr, env->pc);
4215
#endif
4216

    
4217
    if (is_exec)
4218
        raise_exception(TT_CODE_ACCESS);
4219
    else
4220
        raise_exception(TT_DATA_ACCESS);
4221

    
4222
    env = saved_env;
4223
}
4224
#endif
4225

    
4226

    
4227
#ifdef TARGET_SPARC64
4228
void helper_tick_set_count(void *opaque, uint64_t count)
4229
{
4230
#if !defined(CONFIG_USER_ONLY)
4231
    cpu_tick_set_count(opaque, count);
4232
#endif
4233
}
4234

    
4235
uint64_t helper_tick_get_count(void *opaque)
4236
{
4237
#if !defined(CONFIG_USER_ONLY)
4238
    return cpu_tick_get_count(opaque);
4239
#else
4240
    return 0;
4241
#endif
4242
}
4243

    
4244
void helper_tick_set_limit(void *opaque, uint64_t limit)
4245
{
4246
#if !defined(CONFIG_USER_ONLY)
4247
    cpu_tick_set_limit(opaque, limit);
4248
#endif
4249
}
4250
#endif