Statistics
| Branch: | Revision:

root / target-alpha / op_helper.c @ 2d8ee4e7

History | View | Annotate | Download (21.8 kB)

1
/*
2
 *  Alpha emulation cpu micro-operations helpers for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20

    
21
#include "exec.h"
22
#include "host-utils.h"
23
#include "softfloat.h"
24

    
25
#include "op_helper.h"
26

    
27
#define MEMSUFFIX _raw
28
#include "op_helper_mem.h"
29

    
30
#if !defined(CONFIG_USER_ONLY)
31
#define MEMSUFFIX _kernel
32
#include "op_helper_mem.h"
33

    
34
#define MEMSUFFIX _executive
35
#include "op_helper_mem.h"
36

    
37
#define MEMSUFFIX _supervisor
38
#include "op_helper_mem.h"
39

    
40
#define MEMSUFFIX _user
41
#include "op_helper_mem.h"
42

    
43
/* This is used for pal modes */
44
#define MEMSUFFIX _data
45
#include "op_helper_mem.h"
46
#endif
47

    
48
void helper_tb_flush (void)
49
{
50
    tlb_flush(env, 1);
51
}
52

    
53
void cpu_dump_EA (target_ulong EA);
54
void helper_print_mem_EA (target_ulong EA)
55
{
56
    cpu_dump_EA(EA);
57
}
58

    
59
/*****************************************************************************/
60
/* Exceptions processing helpers */
61
void helper_excp (uint32_t excp, uint32_t error)
62
{
63
    env->exception_index = excp;
64
    env->error_code = error;
65
    cpu_loop_exit();
66
}
67

    
68
void helper_amask (void)
69
{
70
    switch (env->implver) {
71
    case IMPLVER_2106x:
72
        /* EV4, EV45, LCA, LCA45 & EV5 */
73
        break;
74
    case IMPLVER_21164:
75
    case IMPLVER_21264:
76
    case IMPLVER_21364:
77
        T0 &= ~env->amask;
78
        break;
79
    }
80
}
81

    
82
void helper_load_pcc (void)
83
{
84
    /* XXX: TODO */
85
    T0 = 0;
86
}
87

    
88
void helper_load_implver (void)
89
{
90
    T0 = env->implver;
91
}
92

    
93
void helper_load_fpcr (void)
94
{
95
    T0 = 0;
96
#ifdef CONFIG_SOFTFLOAT
97
    T0 |= env->fp_status.float_exception_flags << 52;
98
    if (env->fp_status.float_exception_flags)
99
        T0 |= 1ULL << 63;
100
    env->ipr[IPR_EXC_SUM] &= ~0x3E:
101
    env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
102
#endif
103
    switch (env->fp_status.float_rounding_mode) {
104
    case float_round_nearest_even:
105
        T0 |= 2ULL << 58;
106
        break;
107
    case float_round_down:
108
        T0 |= 1ULL << 58;
109
        break;
110
    case float_round_up:
111
        T0 |= 3ULL << 58;
112
        break;
113
    case float_round_to_zero:
114
        break;
115
    }
116
}
117

    
118
void helper_store_fpcr (void)
119
{
120
#ifdef CONFIG_SOFTFLOAT
121
    set_float_exception_flags((T0 >> 52) & 0x3F, &FP_STATUS);
122
#endif
123
    switch ((T0 >> 58) & 3) {
124
    case 0:
125
        set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
126
        break;
127
    case 1:
128
        set_float_rounding_mode(float_round_down, &FP_STATUS);
129
        break;
130
    case 2:
131
        set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
132
        break;
133
    case 3:
134
        set_float_rounding_mode(float_round_up, &FP_STATUS);
135
        break;
136
    }
137
}
138

    
139
void helper_load_irf (void)
140
{
141
    /* XXX: TODO */
142
    T0 = 0;
143
}
144

    
145
void helper_set_irf (void)
146
{
147
    /* XXX: TODO */
148
}
149

    
150
void helper_clear_irf (void)
151
{
152
    /* XXX: TODO */
153
}
154

    
155
void helper_addqv (void)
156
{
157
    T2 = T0;
158
    T0 += T1;
159
    if (unlikely((T2 ^ T1 ^ (-1ULL)) & (T2 ^ T0) & (1ULL << 63))) {
160
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
161
    }
162
}
163

    
164
void helper_addlv (void)
165
{
166
    T2 = T0;
167
    T0 = (uint32_t)(T0 + T1);
168
    if (unlikely((T2 ^ T1 ^ (-1UL)) & (T2 ^ T0) & (1UL << 31))) {
169
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
170
    }
171
}
172

    
173
void helper_subqv (void)
174
{
175
    T2 = T0;
176
    T0 -= T1;
177
    if (unlikely(((~T2) ^ T0 ^ (-1ULL)) & ((~T2) ^ T1) & (1ULL << 63))) {
178
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
179
    }
180
}
181

    
182
void helper_sublv (void)
183
{
184
    T2 = T0;
185
    T0 = (uint32_t)(T0 - T1);
186
    if (unlikely(((~T2) ^ T0 ^ (-1UL)) & ((~T2) ^ T1) & (1UL << 31))) {
187
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
188
    }
189
}
190

    
191
void helper_mullv (void)
192
{
193
    int64_t res = (int64_t)T0 * (int64_t)T1;
194

    
195
    if (unlikely((int32_t)res != res)) {
196
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
197
    }
198
    T0 = (int64_t)((int32_t)res);
199
}
200

    
201
void helper_mulqv ()
202
{
203
    uint64_t tl, th;
204

    
205
    muls64(&tl, &th, T0, T1);
206
    /* If th != 0 && th != -1, then we had an overflow */
207
    if (unlikely((th + 1) > 1)) {
208
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
209
    }
210
    T0 = tl;
211
}
212

    
213
void helper_ctpop (void)
214
{
215
    T0 = ctpop64(T0);
216
}
217

    
218
void helper_ctlz (void)
219
{
220
    T0 = clz64(T0);
221
}
222

    
223
void helper_cttz (void)
224
{
225
    T0 = ctz64(T0);
226
}
227

    
228
static always_inline uint64_t byte_zap (uint64_t op, uint8_t mskb)
229
{
230
    uint64_t mask;
231

    
232
    mask = 0;
233
    mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
234
    mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
235
    mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
236
    mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
237
    mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
238
    mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
239
    mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
240
    mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
241

    
242
    return op & ~mask;
243
}
244

    
245
void helper_mskbl (void)
246
{
247
    T0 = byte_zap(T0, 0x01 << (T1 & 7));
248
}
249

    
250
void helper_extbl (void)
251
{
252
    T0 >>= (T1 & 7) * 8;
253
    T0 = byte_zap(T0, 0xFE);
254
}
255

    
256
void helper_insbl (void)
257
{
258
    T0 <<= (T1 & 7) * 8;
259
    T0 = byte_zap(T0, ~(0x01 << (T1 & 7)));
260
}
261

    
262
void helper_mskwl (void)
263
{
264
    T0 = byte_zap(T0, 0x03 << (T1 & 7));
265
}
266

    
267
void helper_extwl (void)
268
{
269
    T0 >>= (T1 & 7) * 8;
270
    T0 = byte_zap(T0, 0xFC);
271
}
272

    
273
void helper_inswl (void)
274
{
275
    T0 <<= (T1 & 7) * 8;
276
    T0 = byte_zap(T0, ~(0x03 << (T1 & 7)));
277
}
278

    
279
void helper_mskll (void)
280
{
281
    T0 = byte_zap(T0, 0x0F << (T1 & 7));
282
}
283

    
284
void helper_extll (void)
285
{
286
    T0 >>= (T1 & 7) * 8;
287
    T0 = byte_zap(T0, 0xF0);
288
}
289

    
290
void helper_insll (void)
291
{
292
    T0 <<= (T1 & 7) * 8;
293
    T0 = byte_zap(T0, ~(0x0F << (T1 & 7)));
294
}
295

    
296
void helper_zap (void)
297
{
298
    T0 = byte_zap(T0, T1);
299
}
300

    
301
void helper_zapnot (void)
302
{
303
    T0 = byte_zap(T0, ~T1);
304
}
305

    
306
void helper_mskql (void)
307
{
308
    T0 = byte_zap(T0, 0xFF << (T1 & 7));
309
}
310

    
311
void helper_extql (void)
312
{
313
    T0 >>= (T1 & 7) * 8;
314
    T0 = byte_zap(T0, 0x00);
315
}
316

    
317
void helper_insql (void)
318
{
319
    T0 <<= (T1 & 7) * 8;
320
    T0 = byte_zap(T0, ~(0xFF << (T1 & 7)));
321
}
322

    
323
void helper_mskwh (void)
324
{
325
    T0 = byte_zap(T0, (0x03 << (T1 & 7)) >> 8);
326
}
327

    
328
void helper_inswh (void)
329
{
330
    T0 >>= 64 - ((T1 & 7) * 8);
331
    T0 = byte_zap(T0, ~((0x03 << (T1 & 7)) >> 8));
332
}
333

    
334
void helper_extwh (void)
335
{
336
    T0 <<= 64 - ((T1 & 7) * 8);
337
    T0 = byte_zap(T0, ~0x07);
338
}
339

    
340
void helper_msklh (void)
341
{
342
    T0 = byte_zap(T0, (0x0F << (T1 & 7)) >> 8);
343
}
344

    
345
void helper_inslh (void)
346
{
347
    T0 >>= 64 - ((T1 & 7) * 8);
348
    T0 = byte_zap(T0, ~((0x0F << (T1 & 7)) >> 8));
349
}
350

    
351
void helper_extlh (void)
352
{
353
    T0 <<= 64 - ((T1 & 7) * 8);
354
    T0 = byte_zap(T0, ~0x0F);
355
}
356

    
357
void helper_mskqh (void)
358
{
359
    T0 = byte_zap(T0, (0xFF << (T1 & 7)) >> 8);
360
}
361

    
362
void helper_insqh (void)
363
{
364
    T0 >>= 64 - ((T1 & 7) * 8);
365
    T0 = byte_zap(T0, ~((0xFF << (T1 & 7)) >> 8));
366
}
367

    
368
void helper_extqh (void)
369
{
370
    T0 <<= 64 - ((T1 & 7) * 8);
371
    T0 = byte_zap(T0, 0x00);
372
}
373

    
374
void helper_cmpbge (void)
375
{
376
    uint8_t opa, opb, res;
377
    int i;
378

    
379
    res = 0;
380
    for (i = 0; i < 7; i++) {
381
        opa = T0 >> (i * 8);
382
        opb = T1 >> (i * 8);
383
        if (opa >= opb)
384
            res |= 1 << i;
385
    }
386
    T0 = res;
387
}
388

    
389
void helper_cmov_fir (int freg)
390
{
391
    if (FT0 != 0)
392
        env->fir[freg] = FT1;
393
}
394

    
395
void helper_sqrts (void)
396
{
397
    FT0 = float32_sqrt(FT0, &FP_STATUS);
398
}
399

    
400
void helper_cpys (void)
401
{
402
    union {
403
        double d;
404
        uint64_t i;
405
    } p, q, r;
406

    
407
    p.d = FT0;
408
    q.d = FT1;
409
    r.i = p.i & 0x8000000000000000ULL;
410
    r.i |= q.i & ~0x8000000000000000ULL;
411
    FT0 = r.d;
412
}
413

    
414
void helper_cpysn (void)
415
{
416
    union {
417
        double d;
418
        uint64_t i;
419
    } p, q, r;
420

    
421
    p.d = FT0;
422
    q.d = FT1;
423
    r.i = (~p.i) & 0x8000000000000000ULL;
424
    r.i |= q.i & ~0x8000000000000000ULL;
425
    FT0 = r.d;
426
}
427

    
428
void helper_cpyse (void)
429
{
430
    union {
431
        double d;
432
        uint64_t i;
433
    } p, q, r;
434

    
435
    p.d = FT0;
436
    q.d = FT1;
437
    r.i = p.i & 0xFFF0000000000000ULL;
438
    r.i |= q.i & ~0xFFF0000000000000ULL;
439
    FT0 = r.d;
440
}
441

    
442
void helper_itofs (void)
443
{
444
    union {
445
        double d;
446
        uint64_t i;
447
    } p;
448

    
449
    p.d = FT0;
450
    FT0 = int64_to_float32(p.i, &FP_STATUS);
451
}
452

    
453
void helper_ftois (void)
454
{
455
    union {
456
        double d;
457
        uint64_t i;
458
    } p;
459

    
460
    p.i = float32_to_int64(FT0, &FP_STATUS);
461
    FT0 = p.d;
462
}
463

    
464
void helper_sqrtt (void)
465
{
466
    FT0 = float64_sqrt(FT0, &FP_STATUS);
467
}
468

    
469
void helper_cmptun (void)
470
{
471
    union {
472
        double d;
473
        uint64_t i;
474
    } p;
475

    
476
    p.i = 0;
477
    if (float64_is_nan(FT0) || float64_is_nan(FT1))
478
        p.i = 0x4000000000000000ULL;
479
    FT0 = p.d;
480
}
481

    
482
void helper_cmpteq (void)
483
{
484
    union {
485
        double d;
486
        uint64_t i;
487
    } p;
488

    
489
    p.i = 0;
490
    if (float64_eq(FT0, FT1, &FP_STATUS))
491
        p.i = 0x4000000000000000ULL;
492
    FT0 = p.d;
493
}
494

    
495
void helper_cmptle (void)
496
{
497
    union {
498
        double d;
499
        uint64_t i;
500
    } p;
501

    
502
    p.i = 0;
503
    if (float64_le(FT0, FT1, &FP_STATUS))
504
        p.i = 0x4000000000000000ULL;
505
    FT0 = p.d;
506
}
507

    
508
void helper_cmptlt (void)
509
{
510
    union {
511
        double d;
512
        uint64_t i;
513
    } p;
514

    
515
    p.i = 0;
516
    if (float64_lt(FT0, FT1, &FP_STATUS))
517
        p.i = 0x4000000000000000ULL;
518
    FT0 = p.d;
519
}
520

    
521
void helper_itoft (void)
522
{
523
    union {
524
        double d;
525
        uint64_t i;
526
    } p;
527

    
528
    p.d = FT0;
529
    FT0 = int64_to_float64(p.i, &FP_STATUS);
530
}
531

    
532
void helper_ftoit (void)
533
{
534
    union {
535
        double d;
536
        uint64_t i;
537
    } p;
538

    
539
    p.i = float64_to_int64(FT0, &FP_STATUS);
540
    FT0 = p.d;
541
}
542

    
543
static always_inline int vaxf_is_valid (float ff)
544
{
545
    union {
546
        float f;
547
        uint32_t i;
548
    } p;
549
    uint32_t exp, mant;
550

    
551
    p.f = ff;
552
    exp = (p.i >> 23) & 0xFF;
553
    mant = p.i & 0x007FFFFF;
554
    if (exp == 0 && ((p.i & 0x80000000) || mant != 0)) {
555
        /* Reserved operands / Dirty zero */
556
        return 0;
557
    }
558

    
559
    return 1;
560
}
561

    
562
static always_inline float vaxf_to_ieee32 (float ff)
563
{
564
    union {
565
        float f;
566
        uint32_t i;
567
    } p;
568
    uint32_t exp;
569

    
570
    p.f = ff;
571
    exp = (p.i >> 23) & 0xFF;
572
    if (exp < 3) {
573
        /* Underflow */
574
        p.f = 0.0;
575
    } else {
576
        p.f *= 0.25;
577
    }
578

    
579
    return p.f;
580
}
581

    
582
static always_inline float ieee32_to_vaxf (float fi)
583
{
584
    union {
585
        float f;
586
        uint32_t i;
587
    } p;
588
    uint32_t exp, mant;
589

    
590
    p.f = fi;
591
    exp = (p.i >> 23) & 0xFF;
592
    mant = p.i & 0x007FFFFF;
593
    if (exp == 255) {
594
        /* NaN or infinity */
595
        p.i = 1;
596
    } else if (exp == 0) {
597
        if (mant == 0) {
598
            /* Zero */
599
            p.i = 0;
600
        } else {
601
            /* Denormalized */
602
            p.f *= 2.0;
603
        }
604
    } else {
605
        if (exp >= 253) {
606
            /* Overflow */
607
            p.i = 1;
608
        } else {
609
            p.f *= 4.0;
610
        }
611
    }
612

    
613
    return p.f;
614
}
615

    
616
void helper_addf (void)
617
{
618
    float ft0, ft1, ft2;
619

    
620
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
621
        /* XXX: TODO */
622
    }
623
    ft0 = vaxf_to_ieee32(FT0);
624
    ft1 = vaxf_to_ieee32(FT1);
625
    ft2 = float32_add(ft0, ft1, &FP_STATUS);
626
    FT0 = ieee32_to_vaxf(ft2);
627
}
628

    
629
void helper_subf (void)
630
{
631
    float ft0, ft1, ft2;
632

    
633
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
634
        /* XXX: TODO */
635
    }
636
    ft0 = vaxf_to_ieee32(FT0);
637
    ft1 = vaxf_to_ieee32(FT1);
638
    ft2 = float32_sub(ft0, ft1, &FP_STATUS);
639
    FT0 = ieee32_to_vaxf(ft2);
640
}
641

    
642
void helper_mulf (void)
643
{
644
    float ft0, ft1, ft2;
645

    
646
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
647
        /* XXX: TODO */
648
    }
649
    ft0 = vaxf_to_ieee32(FT0);
650
    ft1 = vaxf_to_ieee32(FT1);
651
    ft2 = float32_mul(ft0, ft1, &FP_STATUS);
652
    FT0 = ieee32_to_vaxf(ft2);
653
}
654

    
655
void helper_divf (void)
656
{
657
    float ft0, ft1, ft2;
658

    
659
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
660
        /* XXX: TODO */
661
    }
662
    ft0 = vaxf_to_ieee32(FT0);
663
    ft1 = vaxf_to_ieee32(FT1);
664
    ft2 = float32_div(ft0, ft1, &FP_STATUS);
665
    FT0 = ieee32_to_vaxf(ft2);
666
}
667

    
668
void helper_sqrtf (void)
669
{
670
    float ft0, ft1;
671

    
672
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
673
        /* XXX: TODO */
674
    }
675
    ft0 = vaxf_to_ieee32(FT0);
676
    ft1 = float32_sqrt(ft0, &FP_STATUS);
677
    FT0 = ieee32_to_vaxf(ft1);
678
}
679

    
680
void helper_itoff (void)
681
{
682
    /* XXX: TODO */
683
}
684

    
685
static always_inline int vaxg_is_valid (double ff)
686
{
687
    union {
688
        double f;
689
        uint64_t i;
690
    } p;
691
    uint64_t exp, mant;
692

    
693
    p.f = ff;
694
    exp = (p.i >> 52) & 0x7FF;
695
    mant = p.i & 0x000FFFFFFFFFFFFFULL;
696
    if (exp == 0 && ((p.i & 0x8000000000000000ULL) || mant != 0)) {
697
        /* Reserved operands / Dirty zero */
698
        return 0;
699
    }
700

    
701
    return 1;
702
}
703

    
704
static always_inline double vaxg_to_ieee64 (double fg)
705
{
706
    union {
707
        double f;
708
        uint64_t i;
709
    } p;
710
    uint32_t exp;
711

    
712
    p.f = fg;
713
    exp = (p.i >> 52) & 0x7FF;
714
    if (exp < 3) {
715
        /* Underflow */
716
        p.f = 0.0;
717
    } else {
718
        p.f *= 0.25;
719
    }
720

    
721
    return p.f;
722
}
723

    
724
static always_inline double ieee64_to_vaxg (double fi)
725
{
726
    union {
727
        double f;
728
        uint64_t i;
729
    } p;
730
    uint64_t mant;
731
    uint32_t exp;
732

    
733
    p.f = fi;
734
    exp = (p.i >> 52) & 0x7FF;
735
    mant = p.i & 0x000FFFFFFFFFFFFFULL;
736
    if (exp == 255) {
737
        /* NaN or infinity */
738
        p.i = 1; /* VAX dirty zero */
739
    } else if (exp == 0) {
740
        if (mant == 0) {
741
            /* Zero */
742
            p.i = 0;
743
        } else {
744
            /* Denormalized */
745
            p.f *= 2.0;
746
        }
747
    } else {
748
        if (exp >= 2045) {
749
            /* Overflow */
750
            p.i = 1; /* VAX dirty zero */
751
        } else {
752
            p.f *= 4.0;
753
        }
754
    }
755

    
756
    return p.f;
757
}
758

    
759
void helper_addg (void)
760
{
761
    double ft0, ft1, ft2;
762

    
763
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
764
        /* XXX: TODO */
765
    }
766
    ft0 = vaxg_to_ieee64(FT0);
767
    ft1 = vaxg_to_ieee64(FT1);
768
    ft2 = float64_add(ft0, ft1, &FP_STATUS);
769
    FT0 = ieee64_to_vaxg(ft2);
770
}
771

    
772
void helper_subg (void)
773
{
774
    double ft0, ft1, ft2;
775

    
776
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
777
        /* XXX: TODO */
778
    }
779
    ft0 = vaxg_to_ieee64(FT0);
780
    ft1 = vaxg_to_ieee64(FT1);
781
    ft2 = float64_sub(ft0, ft1, &FP_STATUS);
782
    FT0 = ieee64_to_vaxg(ft2);
783
}
784

    
785
void helper_mulg (void)
786
{
787
    double ft0, ft1, ft2;
788

    
789
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
790
        /* XXX: TODO */
791
    }
792
    ft0 = vaxg_to_ieee64(FT0);
793
    ft1 = vaxg_to_ieee64(FT1);
794
    ft2 = float64_mul(ft0, ft1, &FP_STATUS);
795
    FT0 = ieee64_to_vaxg(ft2);
796
}
797

    
798
void helper_divg (void)
799
{
800
    double ft0, ft1, ft2;
801

    
802
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
803
        /* XXX: TODO */
804
    }
805
    ft0 = vaxg_to_ieee64(FT0);
806
    ft1 = vaxg_to_ieee64(FT1);
807
    ft2 = float64_div(ft0, ft1, &FP_STATUS);
808
    FT0 = ieee64_to_vaxg(ft2);
809
}
810

    
811
void helper_sqrtg (void)
812
{
813
    double ft0, ft1;
814

    
815
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
816
        /* XXX: TODO */
817
    }
818
    ft0 = vaxg_to_ieee64(FT0);
819
    ft1 = float64_sqrt(ft0, &FP_STATUS);
820
    FT0 = ieee64_to_vaxg(ft1);
821
}
822

    
823
void helper_cmpgeq (void)
824
{
825
    union {
826
        double d;
827
        uint64_t u;
828
    } p;
829
    double ft0, ft1;
830

    
831
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
832
        /* XXX: TODO */
833
    }
834
    ft0 = vaxg_to_ieee64(FT0);
835
    ft1 = vaxg_to_ieee64(FT1);
836
    p.u = 0;
837
    if (float64_eq(ft0, ft1, &FP_STATUS))
838
        p.u = 0x4000000000000000ULL;
839
    FT0 = p.d;
840
}
841

    
842
void helper_cmpglt (void)
843
{
844
    union {
845
        double d;
846
        uint64_t u;
847
    } p;
848
    double ft0, ft1;
849

    
850
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
851
        /* XXX: TODO */
852
    }
853
    ft0 = vaxg_to_ieee64(FT0);
854
    ft1 = vaxg_to_ieee64(FT1);
855
    p.u = 0;
856
    if (float64_lt(ft0, ft1, &FP_STATUS))
857
        p.u = 0x4000000000000000ULL;
858
    FT0 = p.d;
859
}
860

    
861
void helper_cmpgle (void)
862
{
863
    union {
864
        double d;
865
        uint64_t u;
866
    } p;
867
    double ft0, ft1;
868

    
869
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
870
        /* XXX: TODO */
871
    }
872
    ft0 = vaxg_to_ieee64(FT0);
873
    ft1 = vaxg_to_ieee64(FT1);
874
    p.u = 0;
875
    if (float64_le(ft0, ft1, &FP_STATUS))
876
        p.u = 0x4000000000000000ULL;
877
    FT0 = p.d;
878
}
879

    
880
void helper_cvtqs (void)
881
{
882
    union {
883
        double d;
884
        uint64_t u;
885
    } p;
886

    
887
    p.d = FT0;
888
    FT0 = (float)p.u;
889
}
890

    
891
void helper_cvttq (void)
892
{
893
    union {
894
        double d;
895
        uint64_t u;
896
    } p;
897

    
898
    p.u = FT0;
899
    FT0 = p.d;
900
}
901

    
902
void helper_cvtqt (void)
903
{
904
    union {
905
        double d;
906
        uint64_t u;
907
    } p;
908

    
909
    p.d = FT0;
910
    FT0 = p.u;
911
}
912

    
913
void helper_cvtqf (void)
914
{
915
    union {
916
        double d;
917
        uint64_t u;
918
    } p;
919

    
920
    p.d = FT0;
921
    FT0 = ieee32_to_vaxf(p.u);
922
}
923

    
924
void helper_cvtgf (void)
925
{
926
    double ft0;
927

    
928
    ft0 = vaxg_to_ieee64(FT0);
929
    FT0 = ieee32_to_vaxf(ft0);
930
}
931

    
932
void helper_cvtgd (void)
933
{
934
    /* XXX: TODO */
935
}
936

    
937
void helper_cvtgq (void)
938
{
939
    union {
940
        double d;
941
        uint64_t u;
942
    } p;
943

    
944
    p.u = vaxg_to_ieee64(FT0);
945
    FT0 = p.d;
946
}
947

    
948
void helper_cvtqg (void)
949
{
950
    union {
951
        double d;
952
        uint64_t u;
953
    } p;
954

    
955
    p.d = FT0;
956
    FT0 = ieee64_to_vaxg(p.u);
957
}
958

    
959
void helper_cvtdg (void)
960
{
961
    /* XXX: TODO */
962
}
963

    
964
void helper_cvtlq (void)
965
{
966
    union {
967
        double d;
968
        uint64_t u;
969
    } p, q;
970

    
971
    p.d = FT0;
972
    q.u = (p.u >> 29) & 0x3FFFFFFF;
973
    q.u |= (p.u >> 32);
974
    q.u = (int64_t)((int32_t)q.u);
975
    FT0 = q.d;
976
}
977

    
978
static always_inline void __helper_cvtql (int s, int v)
979
{
980
    union {
981
        double d;
982
        uint64_t u;
983
    } p, q;
984

    
985
    p.d = FT0;
986
    q.u = ((uint64_t)(p.u & 0xC0000000)) << 32;
987
    q.u |= ((uint64_t)(p.u & 0x7FFFFFFF)) << 29;
988
    FT0 = q.d;
989
    if (v && (int64_t)((int32_t)p.u) != (int64_t)p.u) {
990
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
991
    }
992
    if (s) {
993
        /* TODO */
994
    }
995
}
996

    
997
void helper_cvtql (void)
998
{
999
    __helper_cvtql(0, 0);
1000
}
1001

    
1002
void helper_cvtqlv (void)
1003
{
1004
    __helper_cvtql(0, 1);
1005
}
1006

    
1007
void helper_cvtqlsv (void)
1008
{
1009
    __helper_cvtql(1, 1);
1010
}
1011

    
1012
void helper_cmpfeq (void)
1013
{
1014
    if (float64_eq(FT0, FT1, &FP_STATUS))
1015
        T0 = 1;
1016
    else
1017
        T0 = 0;
1018
}
1019

    
1020
void helper_cmpfne (void)
1021
{
1022
    if (float64_eq(FT0, FT1, &FP_STATUS))
1023
        T0 = 0;
1024
    else
1025
        T0 = 1;
1026
}
1027

    
1028
void helper_cmpflt (void)
1029
{
1030
    if (float64_lt(FT0, FT1, &FP_STATUS))
1031
        T0 = 1;
1032
    else
1033
        T0 = 0;
1034
}
1035

    
1036
void helper_cmpfle (void)
1037
{
1038
    if (float64_lt(FT0, FT1, &FP_STATUS))
1039
        T0 = 1;
1040
    else
1041
        T0 = 0;
1042
}
1043

    
1044
void helper_cmpfgt (void)
1045
{
1046
    if (float64_le(FT0, FT1, &FP_STATUS))
1047
        T0 = 0;
1048
    else
1049
        T0 = 1;
1050
}
1051

    
1052
void helper_cmpfge (void)
1053
{
1054
    if (float64_lt(FT0, FT1, &FP_STATUS))
1055
        T0 = 0;
1056
    else
1057
        T0 = 1;
1058
}
1059

    
1060
#if !defined (CONFIG_USER_ONLY)
1061
void helper_mfpr (int iprn)
1062
{
1063
    uint64_t val;
1064

    
1065
    if (cpu_alpha_mfpr(env, iprn, &val) == 0)
1066
        T0 = val;
1067
}
1068

    
1069
void helper_mtpr (int iprn)
1070
{
1071
    cpu_alpha_mtpr(env, iprn, T0, NULL);
1072
}
1073
#endif
1074

    
1075
#if defined(HOST_SPARC) || defined(HOST_SPARC64)
1076
void helper_reset_FT0 (void)
1077
{
1078
    FT0 = 0;
1079
}
1080

    
1081
void helper_reset_FT1 (void)
1082
{
1083
    FT1 = 0;
1084
}
1085

    
1086
void helper_reset_FT2 (void)
1087
{
1088
    FT2 = 0;
1089
}
1090
#endif
1091

    
1092
/*****************************************************************************/
1093
/* Softmmu support */
1094
#if !defined (CONFIG_USER_ONLY)
1095

    
1096
#ifdef __s390__
1097
# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
1098
#else
1099
# define GETPC() (__builtin_return_address(0))
1100
#endif
1101

    
1102
/* XXX: the two following helpers are pure hacks.
1103
 *      Hopefully, we emulate the PALcode, then we should never see
1104
 *      HW_LD / HW_ST instructions.
1105
 */
1106
void helper_ld_phys_to_virt (void)
1107
{
1108
    uint64_t tlb_addr, physaddr;
1109
    int index, mmu_idx;
1110
    void *retaddr;
1111

    
1112
    mmu_idx = cpu_mmu_index(env);
1113
    index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1114
 redo:
1115
    tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1116
    if ((T0 & TARGET_PAGE_MASK) ==
1117
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1118
        physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1119
    } else {
1120
        /* the page is not in the TLB : fill it */
1121
        retaddr = GETPC();
1122
        tlb_fill(T0, 0, mmu_idx, retaddr);
1123
        goto redo;
1124
    }
1125
    T0 = physaddr;
1126
}
1127

    
1128
void helper_st_phys_to_virt (void)
1129
{
1130
    uint64_t tlb_addr, physaddr;
1131
    int index, mmu_idx;
1132
    void *retaddr;
1133

    
1134
    mmu_idx = cpu_mmu_index(env);
1135
    index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1136
 redo:
1137
    tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1138
    if ((T0 & TARGET_PAGE_MASK) ==
1139
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1140
        physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1141
    } else {
1142
        /* the page is not in the TLB : fill it */
1143
        retaddr = GETPC();
1144
        tlb_fill(T0, 1, mmu_idx, retaddr);
1145
        goto redo;
1146
    }
1147
    T0 = physaddr;
1148
}
1149

    
1150
#define MMUSUFFIX _mmu
1151

    
1152
#define SHIFT 0
1153
#include "softmmu_template.h"
1154

    
1155
#define SHIFT 1
1156
#include "softmmu_template.h"
1157

    
1158
#define SHIFT 2
1159
#include "softmmu_template.h"
1160

    
1161
#define SHIFT 3
1162
#include "softmmu_template.h"
1163

    
1164
/* try to fill the TLB and return an exception if error. If retaddr is
1165
   NULL, it means that the function was called in C code (i.e. not
1166
   from generated code or from helper.c) */
1167
/* XXX: fix it to restore all registers */
1168
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1169
{
1170
    TranslationBlock *tb;
1171
    CPUState *saved_env;
1172
    unsigned long pc;
1173
    int ret;
1174

    
1175
    /* XXX: hack to restore env in all cases, even if not called from
1176
       generated code */
1177
    saved_env = env;
1178
    env = cpu_single_env;
1179
    ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1180
    if (!likely(ret == 0)) {
1181
        if (likely(retaddr)) {
1182
            /* now we have a real cpu fault */
1183
            pc = (unsigned long)retaddr;
1184
            tb = tb_find_pc(pc);
1185
            if (likely(tb)) {
1186
                /* the PC is inside the translated code. It means that we have
1187
                   a virtual CPU fault */
1188
                cpu_restore_state(tb, env, pc, NULL);
1189
            }
1190
        }
1191
        /* Exception index and error code are already set */
1192
        cpu_loop_exit();
1193
    }
1194
    env = saved_env;
1195
}
1196

    
1197
#endif