Statistics
| Branch: | Revision:

root / target-alpha / op_helper.c @ f071b4d3

History | View | Annotate | Download (22.7 kB)

1
/*
2
 *  Alpha emulation cpu micro-operations helpers for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20

    
21
#include "exec.h"
22
#include "softfloat.h"
23

    
24
#include "op_helper.h"
25

    
26
#define MEMSUFFIX _raw
27
#include "op_helper_mem.h"
28

    
29
#if !defined(CONFIG_USER_ONLY)
30
#define MEMSUFFIX _kernel
31
#include "op_helper_mem.h"
32

    
33
#define MEMSUFFIX _executive
34
#include "op_helper_mem.h"
35

    
36
#define MEMSUFFIX _supervisor
37
#include "op_helper_mem.h"
38

    
39
#define MEMSUFFIX _user
40
#include "op_helper_mem.h"
41

    
42
/* This is used for pal modes */
43
#define MEMSUFFIX _data
44
#include "op_helper_mem.h"
45
#endif
46

    
47
void helper_tb_flush (void)
48
{
49
    tlb_flush(env, 1);
50
}
51

    
52
void cpu_dump_EA (target_ulong EA);
53
void helper_print_mem_EA (target_ulong EA)
54
{
55
    cpu_dump_EA(EA);
56
}
57

    
58
/*****************************************************************************/
59
/* Exceptions processing helpers */
60
void helper_excp (uint32_t excp, uint32_t error)
61
{
62
    env->exception_index = excp;
63
    env->error_code = error;
64
    cpu_loop_exit();
65
}
66

    
67
void helper_amask (void)
68
{
69
    switch (env->implver) {
70
    case IMPLVER_2106x:
71
        /* EV4, EV45, LCA, LCA45 & EV5 */
72
        break;
73
    case IMPLVER_21164:
74
    case IMPLVER_21264:
75
    case IMPLVER_21364:
76
        T0 &= ~env->amask;
77
        break;
78
    }
79
}
80

    
81
void helper_load_pcc (void)
82
{
83
    /* XXX: TODO */
84
    T0 = 0;
85
}
86

    
87
void helper_load_implver (void)
88
{
89
    T0 = env->implver;
90
}
91

    
92
void helper_load_fpcr (void)
93
{
94
    T0 = 0;
95
#ifdef CONFIG_SOFTFLOAT
96
    T0 |= env->fp_status.float_exception_flags << 52;
97
    if (env->fp_status.float_exception_flags)
98
        T0 |= 1ULL << 63;
99
    env->ipr[IPR_EXC_SUM] &= ~0x3E:
100
    env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
101
#endif
102
    switch (env->fp_status.float_rounding_mode) {
103
    case float_round_nearest_even:
104
        T0 |= 2ULL << 58;
105
        break;
106
    case float_round_down:
107
        T0 |= 1ULL << 58;
108
        break;
109
    case float_round_up:
110
        T0 |= 3ULL << 58;
111
        break;
112
    case float_round_to_zero:
113
        break;
114
    }
115
}
116

    
117
void helper_store_fpcr (void)
118
{
119
#ifdef CONFIG_SOFTFLOAT
120
    set_float_exception_flags((T0 >> 52) & 0x3F, &FP_STATUS);
121
#endif
122
    switch ((T0 >> 58) & 3) {
123
    case 0:
124
        set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
125
        break;
126
    case 1:
127
        set_float_rounding_mode(float_round_down, &FP_STATUS);
128
        break;
129
    case 2:
130
        set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
131
        break;
132
    case 3:
133
        set_float_rounding_mode(float_round_up, &FP_STATUS);
134
        break;
135
    }
136
}
137

    
138
void helper_load_irf (void)
139
{
140
    /* XXX: TODO */
141
    T0 = 0;
142
}
143

    
144
void helper_set_irf (void)
145
{
146
    /* XXX: TODO */
147
}
148

    
149
void helper_clear_irf (void)
150
{
151
    /* XXX: TODO */
152
}
153

    
154
void helper_addqv (void)
155
{
156
    T2 = T0;
157
    T0 += T1;
158
    if (unlikely((T2 ^ T1 ^ (-1ULL)) & (T2 ^ T0) & (1ULL << 63))) {
159
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
160
    }
161
}
162

    
163
void helper_addlv (void)
164
{
165
    T2 = T0;
166
    T0 = (uint32_t)(T0 + T1);
167
    if (unlikely((T2 ^ T1 ^ (-1UL)) & (T2 ^ T0) & (1UL << 31))) {
168
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
169
    }
170
}
171

    
172
void helper_subqv (void)
173
{
174
    T2 = T0;
175
    T0 -= T1;
176
    if (unlikely(((~T2) ^ T0 ^ (-1ULL)) & ((~T2) ^ T1) & (1ULL << 63))) {
177
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
178
    }
179
}
180

    
181
void helper_sublv (void)
182
{
183
    T2 = T0;
184
    T0 = (uint32_t)(T0 - T1);
185
    if (unlikely(((~T2) ^ T0 ^ (-1UL)) & ((~T2) ^ T1) & (1UL << 31))) {
186
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
187
    }
188
}
189

    
190
void helper_mullv (void)
191
{
192
    int64_t res = (int64_t)T0 * (int64_t)T1;
193

    
194
    if (unlikely((int32_t)res != res)) {
195
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
196
    }
197
    T0 = (int64_t)((int32_t)res);
198
}
199

    
200
void helper_mulqv ()
201
{
202
    uint64_t tl, th;
203

    
204
    muls64(&tl, &th, T0, T1);
205
    /* If th != 0 && th != -1, then we had an overflow */
206
    if (unlikely((th + 1) > 1)) {
207
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
208
    }
209
    T0 = tl;
210
}
211

    
212
void helper_ctpop (void)
213
{
214
    int n;
215

    
216
    for (n = 0; T0 != 0; n++)
217
        T0 = T0 ^ (T0 - 1);
218
    T0 = n;
219
}
220

    
221
void helper_ctlz (void)
222
{
223
    uint32_t op32;
224
    int n;
225

    
226
    n = 0;
227
    if (!(T0 & 0xFFFFFFFF00000000ULL)) {
228
        n += 32;
229
        T0 <<= 32;
230
    }
231
    /* Make it easier for 32 bits hosts */
232
    op32 = T0 >> 32;
233
    if (!(op32 & 0xFFFF0000UL)) {
234
        n += 16;
235
        op32 <<= 16;
236
    }
237
    if (!(op32 & 0xFF000000UL)) {
238
        n += 8;
239
        op32 <<= 8;
240
    }
241
    if (!(op32 & 0xF0000000UL)) {
242
        n += 4;
243
        op32 <<= 4;
244
    }
245
    if (!(op32 & 0xC0000000UL)) {
246
        n += 2;
247
        op32 <<= 2;
248
    }
249
    if (!(op32 & 0x80000000UL)) {
250
        n++;
251
        op32 <<= 1;
252
    }
253
    if (!(op32 & 0x80000000UL)) {
254
        n++;
255
    }
256
    T0 = n;
257
}
258

    
259
void helper_cttz (void)
260
{
261
    uint32_t op32;
262
    int n;
263

    
264
    n = 0;
265
    if (!(T0 & 0x00000000FFFFFFFFULL)) {
266
        n += 32;
267
        T0 >>= 32;
268
    }
269
    /* Make it easier for 32 bits hosts */
270
    op32 = T0;
271
    if (!(op32 & 0x0000FFFFUL)) {
272
        n += 16;
273
        op32 >>= 16;
274
    }
275
    if (!(op32 & 0x000000FFUL)) {
276
        n += 8;
277
        op32 >>= 8;
278
    }
279
    if (!(op32 & 0x0000000FUL)) {
280
        n += 4;
281
        op32 >>= 4;
282
    }
283
    if (!(op32 & 0x00000003UL)) {
284
        n += 2;
285
        op32 >>= 2;
286
    }
287
    if (!(op32 & 0x00000001UL)) {
288
        n++;
289
        op32 >>= 1;
290
    }
291
    if (!(op32 & 0x00000001UL)) {
292
        n++;
293
    }
294
    T0 = n;
295
}
296

    
297
static always_inline uint64_t byte_zap (uint64_t op, uint8_t mskb)
298
{
299
    uint64_t mask;
300

    
301
    mask = 0;
302
    mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
303
    mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
304
    mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
305
    mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
306
    mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
307
    mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
308
    mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
309
    mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
310

    
311
    return op & ~mask;
312
}
313

    
314
void helper_mskbl (void)
315
{
316
    T0 = byte_zap(T0, 0x01 << (T1 & 7));
317
}
318

    
319
void helper_extbl (void)
320
{
321
    T0 >>= (T1 & 7) * 8;
322
    T0 = byte_zap(T0, 0xFE);
323
}
324

    
325
void helper_insbl (void)
326
{
327
    T0 <<= (T1 & 7) * 8;
328
    T0 = byte_zap(T0, ~(0x01 << (T1 & 7)));
329
}
330

    
331
void helper_mskwl (void)
332
{
333
    T0 = byte_zap(T0, 0x03 << (T1 & 7));
334
}
335

    
336
void helper_extwl (void)
337
{
338
    T0 >>= (T1 & 7) * 8;
339
    T0 = byte_zap(T0, 0xFC);
340
}
341

    
342
void helper_inswl (void)
343
{
344
    T0 <<= (T1 & 7) * 8;
345
    T0 = byte_zap(T0, ~(0x03 << (T1 & 7)));
346
}
347

    
348
void helper_mskll (void)
349
{
350
    T0 = byte_zap(T0, 0x0F << (T1 & 7));
351
}
352

    
353
void helper_extll (void)
354
{
355
    T0 >>= (T1 & 7) * 8;
356
    T0 = byte_zap(T0, 0xF0);
357
}
358

    
359
void helper_insll (void)
360
{
361
    T0 <<= (T1 & 7) * 8;
362
    T0 = byte_zap(T0, ~(0x0F << (T1 & 7)));
363
}
364

    
365
void helper_zap (void)
366
{
367
    T0 = byte_zap(T0, T1);
368
}
369

    
370
void helper_zapnot (void)
371
{
372
    T0 = byte_zap(T0, ~T1);
373
}
374

    
375
void helper_mskql (void)
376
{
377
    T0 = byte_zap(T0, 0xFF << (T1 & 7));
378
}
379

    
380
void helper_extql (void)
381
{
382
    T0 >>= (T1 & 7) * 8;
383
    T0 = byte_zap(T0, 0x00);
384
}
385

    
386
void helper_insql (void)
387
{
388
    T0 <<= (T1 & 7) * 8;
389
    T0 = byte_zap(T0, ~(0xFF << (T1 & 7)));
390
}
391

    
392
void helper_mskwh (void)
393
{
394
    T0 = byte_zap(T0, (0x03 << (T1 & 7)) >> 8);
395
}
396

    
397
void helper_inswh (void)
398
{
399
    T0 >>= 64 - ((T1 & 7) * 8);
400
    T0 = byte_zap(T0, ~((0x03 << (T1 & 7)) >> 8));
401
}
402

    
403
void helper_extwh (void)
404
{
405
    T0 <<= 64 - ((T1 & 7) * 8);
406
    T0 = byte_zap(T0, ~0x07);
407
}
408

    
409
void helper_msklh (void)
410
{
411
    T0 = byte_zap(T0, (0x0F << (T1 & 7)) >> 8);
412
}
413

    
414
void helper_inslh (void)
415
{
416
    T0 >>= 64 - ((T1 & 7) * 8);
417
    T0 = byte_zap(T0, ~((0x0F << (T1 & 7)) >> 8));
418
}
419

    
420
void helper_extlh (void)
421
{
422
    T0 <<= 64 - ((T1 & 7) * 8);
423
    T0 = byte_zap(T0, ~0x0F);
424
}
425

    
426
void helper_mskqh (void)
427
{
428
    T0 = byte_zap(T0, (0xFF << (T1 & 7)) >> 8);
429
}
430

    
431
void helper_insqh (void)
432
{
433
    T0 >>= 64 - ((T1 & 7) * 8);
434
    T0 = byte_zap(T0, ~((0xFF << (T1 & 7)) >> 8));
435
}
436

    
437
void helper_extqh (void)
438
{
439
    T0 <<= 64 - ((T1 & 7) * 8);
440
    T0 = byte_zap(T0, 0x00);
441
}
442

    
443
void helper_cmpbge (void)
444
{
445
    uint8_t opa, opb, res;
446
    int i;
447

    
448
    res = 0;
449
    for (i = 0; i < 7; i++) {
450
        opa = T0 >> (i * 8);
451
        opb = T1 >> (i * 8);
452
        if (opa >= opb)
453
            res |= 1 << i;
454
    }
455
    T0 = res;
456
}
457

    
458
void helper_cmov_fir (int freg)
459
{
460
    if (FT0 != 0)
461
        env->fir[freg] = FT1;
462
}
463

    
464
void helper_sqrts (void)
465
{
466
    FT0 = float32_sqrt(FT0, &FP_STATUS);
467
}
468

    
469
void helper_cpys (void)
470
{
471
    union {
472
        double d;
473
        uint64_t i;
474
    } p, q, r;
475

    
476
    p.d = FT0;
477
    q.d = FT1;
478
    r.i = p.i & 0x8000000000000000ULL;
479
    r.i |= q.i & ~0x8000000000000000ULL;
480
    FT0 = r.d;
481
}
482

    
483
void helper_cpysn (void)
484
{
485
    union {
486
        double d;
487
        uint64_t i;
488
    } p, q, r;
489

    
490
    p.d = FT0;
491
    q.d = FT1;
492
    r.i = (~p.i) & 0x8000000000000000ULL;
493
    r.i |= q.i & ~0x8000000000000000ULL;
494
    FT0 = r.d;
495
}
496

    
497
void helper_cpyse (void)
498
{
499
    union {
500
        double d;
501
        uint64_t i;
502
    } p, q, r;
503

    
504
    p.d = FT0;
505
    q.d = FT1;
506
    r.i = p.i & 0xFFF0000000000000ULL;
507
    r.i |= q.i & ~0xFFF0000000000000ULL;
508
    FT0 = r.d;
509
}
510

    
511
void helper_itofs (void)
512
{
513
    union {
514
        double d;
515
        uint64_t i;
516
    } p;
517

    
518
    p.d = FT0;
519
    FT0 = int64_to_float32(p.i, &FP_STATUS);
520
}
521

    
522
void helper_ftois (void)
523
{
524
    union {
525
        double d;
526
        uint64_t i;
527
    } p;
528

    
529
    p.i = float32_to_int64(FT0, &FP_STATUS);
530
    FT0 = p.d;
531
}
532

    
533
void helper_sqrtt (void)
534
{
535
    FT0 = float64_sqrt(FT0, &FP_STATUS);
536
}
537

    
538
void helper_cmptun (void)
539
{
540
    union {
541
        double d;
542
        uint64_t i;
543
    } p;
544

    
545
    p.i = 0;
546
    if (float64_is_nan(FT0) || float64_is_nan(FT1))
547
        p.i = 0x4000000000000000ULL;
548
    FT0 = p.d;
549
}
550

    
551
void helper_cmpteq (void)
552
{
553
    union {
554
        double d;
555
        uint64_t i;
556
    } p;
557

    
558
    p.i = 0;
559
    if (float64_eq(FT0, FT1, &FP_STATUS))
560
        p.i = 0x4000000000000000ULL;
561
    FT0 = p.d;
562
}
563

    
564
void helper_cmptle (void)
565
{
566
    union {
567
        double d;
568
        uint64_t i;
569
    } p;
570

    
571
    p.i = 0;
572
    if (float64_le(FT0, FT1, &FP_STATUS))
573
        p.i = 0x4000000000000000ULL;
574
    FT0 = p.d;
575
}
576

    
577
void helper_cmptlt (void)
578
{
579
    union {
580
        double d;
581
        uint64_t i;
582
    } p;
583

    
584
    p.i = 0;
585
    if (float64_lt(FT0, FT1, &FP_STATUS))
586
        p.i = 0x4000000000000000ULL;
587
    FT0 = p.d;
588
}
589

    
590
void helper_itoft (void)
591
{
592
    union {
593
        double d;
594
        uint64_t i;
595
    } p;
596

    
597
    p.d = FT0;
598
    FT0 = int64_to_float64(p.i, &FP_STATUS);
599
}
600

    
601
void helper_ftoit (void)
602
{
603
    union {
604
        double d;
605
        uint64_t i;
606
    } p;
607

    
608
    p.i = float64_to_int64(FT0, &FP_STATUS);
609
    FT0 = p.d;
610
}
611

    
612
static always_inline int vaxf_is_valid (float ff)
613
{
614
    union {
615
        float f;
616
        uint32_t i;
617
    } p;
618
    uint32_t exp, mant;
619

    
620
    p.f = ff;
621
    exp = (p.i >> 23) & 0xFF;
622
    mant = p.i & 0x007FFFFF;
623
    if (exp == 0 && ((p.i & 0x80000000) || mant != 0)) {
624
        /* Reserved operands / Dirty zero */
625
        return 0;
626
    }
627

    
628
    return 1;
629
}
630

    
631
static always_inline float vaxf_to_ieee32 (float ff)
632
{
633
    union {
634
        float f;
635
        uint32_t i;
636
    } p;
637
    uint32_t exp;
638

    
639
    p.f = ff;
640
    exp = (p.i >> 23) & 0xFF;
641
    if (exp < 3) {
642
        /* Underflow */
643
        p.f = 0.0;
644
    } else {
645
        p.f *= 0.25;
646
    }
647

    
648
    return p.f;
649
}
650

    
651
static always_inline float ieee32_to_vaxf (float fi)
652
{
653
    union {
654
        float f;
655
        uint32_t i;
656
    } p;
657
    uint32_t exp, mant;
658

    
659
    p.f = fi;
660
    exp = (p.i >> 23) & 0xFF;
661
    mant = p.i & 0x007FFFFF;
662
    if (exp == 255) {
663
        /* NaN or infinity */
664
        p.i = 1;
665
    } else if (exp == 0) {
666
        if (mant == 0) {
667
            /* Zero */
668
            p.i = 0;
669
        } else {
670
            /* Denormalized */
671
            p.f *= 2.0;
672
        }
673
    } else {
674
        if (exp >= 253) {
675
            /* Overflow */
676
            p.i = 1;
677
        } else {
678
            p.f *= 4.0;
679
        }
680
    }
681

    
682
    return p.f;
683
}
684

    
685
void helper_addf (void)
686
{
687
    float ft0, ft1, ft2;
688

    
689
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
690
        /* XXX: TODO */
691
    }
692
    ft0 = vaxf_to_ieee32(FT0);
693
    ft1 = vaxf_to_ieee32(FT1);
694
    ft2 = float32_add(ft0, ft1, &FP_STATUS);
695
    FT0 = ieee32_to_vaxf(ft2);
696
}
697

    
698
void helper_subf (void)
699
{
700
    float ft0, ft1, ft2;
701

    
702
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
703
        /* XXX: TODO */
704
    }
705
    ft0 = vaxf_to_ieee32(FT0);
706
    ft1 = vaxf_to_ieee32(FT1);
707
    ft2 = float32_sub(ft0, ft1, &FP_STATUS);
708
    FT0 = ieee32_to_vaxf(ft2);
709
}
710

    
711
void helper_mulf (void)
712
{
713
    float ft0, ft1, ft2;
714

    
715
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
716
        /* XXX: TODO */
717
    }
718
    ft0 = vaxf_to_ieee32(FT0);
719
    ft1 = vaxf_to_ieee32(FT1);
720
    ft2 = float32_mul(ft0, ft1, &FP_STATUS);
721
    FT0 = ieee32_to_vaxf(ft2);
722
}
723

    
724
void helper_divf (void)
725
{
726
    float ft0, ft1, ft2;
727

    
728
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
729
        /* XXX: TODO */
730
    }
731
    ft0 = vaxf_to_ieee32(FT0);
732
    ft1 = vaxf_to_ieee32(FT1);
733
    ft2 = float32_div(ft0, ft1, &FP_STATUS);
734
    FT0 = ieee32_to_vaxf(ft2);
735
}
736

    
737
void helper_sqrtf (void)
738
{
739
    float ft0, ft1;
740

    
741
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
742
        /* XXX: TODO */
743
    }
744
    ft0 = vaxf_to_ieee32(FT0);
745
    ft1 = float32_sqrt(ft0, &FP_STATUS);
746
    FT0 = ieee32_to_vaxf(ft1);
747
}
748

    
749
void helper_itoff (void)
750
{
751
    /* XXX: TODO */
752
}
753

    
754
static always_inline int vaxg_is_valid (double ff)
755
{
756
    union {
757
        double f;
758
        uint64_t i;
759
    } p;
760
    uint64_t exp, mant;
761

    
762
    p.f = ff;
763
    exp = (p.i >> 52) & 0x7FF;
764
    mant = p.i & 0x000FFFFFFFFFFFFFULL;
765
    if (exp == 0 && ((p.i & 0x8000000000000000ULL) || mant != 0)) {
766
        /* Reserved operands / Dirty zero */
767
        return 0;
768
    }
769

    
770
    return 1;
771
}
772

    
773
static always_inline double vaxg_to_ieee64 (double fg)
774
{
775
    union {
776
        double f;
777
        uint64_t i;
778
    } p;
779
    uint32_t exp;
780

    
781
    p.f = fg;
782
    exp = (p.i >> 52) & 0x7FF;
783
    if (exp < 3) {
784
        /* Underflow */
785
        p.f = 0.0;
786
    } else {
787
        p.f *= 0.25;
788
    }
789

    
790
    return p.f;
791
}
792

    
793
static always_inline double ieee64_to_vaxg (double fi)
794
{
795
    union {
796
        double f;
797
        uint64_t i;
798
    } p;
799
    uint64_t mant;
800
    uint32_t exp;
801

    
802
    p.f = fi;
803
    exp = (p.i >> 52) & 0x7FF;
804
    mant = p.i & 0x000FFFFFFFFFFFFFULL;
805
    if (exp == 255) {
806
        /* NaN or infinity */
807
        p.i = 1; /* VAX dirty zero */
808
    } else if (exp == 0) {
809
        if (mant == 0) {
810
            /* Zero */
811
            p.i = 0;
812
        } else {
813
            /* Denormalized */
814
            p.f *= 2.0;
815
        }
816
    } else {
817
        if (exp >= 2045) {
818
            /* Overflow */
819
            p.i = 1; /* VAX dirty zero */
820
        } else {
821
            p.f *= 4.0;
822
        }
823
    }
824

    
825
    return p.f;
826
}
827

    
828
void helper_addg (void)
829
{
830
    double ft0, ft1, ft2;
831

    
832
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
833
        /* XXX: TODO */
834
    }
835
    ft0 = vaxg_to_ieee64(FT0);
836
    ft1 = vaxg_to_ieee64(FT1);
837
    ft2 = float64_add(ft0, ft1, &FP_STATUS);
838
    FT0 = ieee64_to_vaxg(ft2);
839
}
840

    
841
void helper_subg (void)
842
{
843
    double ft0, ft1, ft2;
844

    
845
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
846
        /* XXX: TODO */
847
    }
848
    ft0 = vaxg_to_ieee64(FT0);
849
    ft1 = vaxg_to_ieee64(FT1);
850
    ft2 = float64_sub(ft0, ft1, &FP_STATUS);
851
    FT0 = ieee64_to_vaxg(ft2);
852
}
853

    
854
void helper_mulg (void)
855
{
856
    double ft0, ft1, ft2;
857

    
858
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
859
        /* XXX: TODO */
860
    }
861
    ft0 = vaxg_to_ieee64(FT0);
862
    ft1 = vaxg_to_ieee64(FT1);
863
    ft2 = float64_mul(ft0, ft1, &FP_STATUS);
864
    FT0 = ieee64_to_vaxg(ft2);
865
}
866

    
867
void helper_divg (void)
868
{
869
    double ft0, ft1, ft2;
870

    
871
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
872
        /* XXX: TODO */
873
    }
874
    ft0 = vaxg_to_ieee64(FT0);
875
    ft1 = vaxg_to_ieee64(FT1);
876
    ft2 = float64_div(ft0, ft1, &FP_STATUS);
877
    FT0 = ieee64_to_vaxg(ft2);
878
}
879

    
880
void helper_sqrtg (void)
881
{
882
    double ft0, ft1;
883

    
884
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
885
        /* XXX: TODO */
886
    }
887
    ft0 = vaxg_to_ieee64(FT0);
888
    ft1 = float64_sqrt(ft0, &FP_STATUS);
889
    FT0 = ieee64_to_vaxg(ft1);
890
}
891

    
892
void helper_cmpgeq (void)
893
{
894
    union {
895
        double d;
896
        uint64_t u;
897
    } p;
898
    double ft0, ft1;
899

    
900
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
901
        /* XXX: TODO */
902
    }
903
    ft0 = vaxg_to_ieee64(FT0);
904
    ft1 = vaxg_to_ieee64(FT1);
905
    p.u = 0;
906
    if (float64_eq(ft0, ft1, &FP_STATUS))
907
        p.u = 0x4000000000000000ULL;
908
    FT0 = p.d;
909
}
910

    
911
void helper_cmpglt (void)
912
{
913
    union {
914
        double d;
915
        uint64_t u;
916
    } p;
917
    double ft0, ft1;
918

    
919
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
920
        /* XXX: TODO */
921
    }
922
    ft0 = vaxg_to_ieee64(FT0);
923
    ft1 = vaxg_to_ieee64(FT1);
924
    p.u = 0;
925
    if (float64_lt(ft0, ft1, &FP_STATUS))
926
        p.u = 0x4000000000000000ULL;
927
    FT0 = p.d;
928
}
929

    
930
void helper_cmpgle (void)
931
{
932
    union {
933
        double d;
934
        uint64_t u;
935
    } p;
936
    double ft0, ft1;
937

    
938
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
939
        /* XXX: TODO */
940
    }
941
    ft0 = vaxg_to_ieee64(FT0);
942
    ft1 = vaxg_to_ieee64(FT1);
943
    p.u = 0;
944
    if (float64_le(ft0, ft1, &FP_STATUS))
945
        p.u = 0x4000000000000000ULL;
946
    FT0 = p.d;
947
}
948

    
949
void helper_cvtqs (void)
950
{
951
    union {
952
        double d;
953
        uint64_t u;
954
    } p;
955

    
956
    p.d = FT0;
957
    FT0 = (float)p.u;
958
}
959

    
960
void helper_cvttq (void)
961
{
962
    union {
963
        double d;
964
        uint64_t u;
965
    } p;
966

    
967
    p.u = FT0;
968
    FT0 = p.d;
969
}
970

    
971
void helper_cvtqt (void)
972
{
973
    union {
974
        double d;
975
        uint64_t u;
976
    } p;
977

    
978
    p.d = FT0;
979
    FT0 = p.u;
980
}
981

    
982
void helper_cvtqf (void)
983
{
984
    union {
985
        double d;
986
        uint64_t u;
987
    } p;
988

    
989
    p.d = FT0;
990
    FT0 = ieee32_to_vaxf(p.u);
991
}
992

    
993
void helper_cvtgf (void)
994
{
995
    double ft0;
996

    
997
    ft0 = vaxg_to_ieee64(FT0);
998
    FT0 = ieee32_to_vaxf(ft0);
999
}
1000

    
1001
void helper_cvtgd (void)
1002
{
1003
    /* XXX: TODO */
1004
}
1005

    
1006
void helper_cvtgq (void)
1007
{
1008
    union {
1009
        double d;
1010
        uint64_t u;
1011
    } p;
1012

    
1013
    p.u = vaxg_to_ieee64(FT0);
1014
    FT0 = p.d;
1015
}
1016

    
1017
void helper_cvtqg (void)
1018
{
1019
    union {
1020
        double d;
1021
        uint64_t u;
1022
    } p;
1023

    
1024
    p.d = FT0;
1025
    FT0 = ieee64_to_vaxg(p.u);
1026
}
1027

    
1028
void helper_cvtdg (void)
1029
{
1030
    /* XXX: TODO */
1031
}
1032

    
1033
void helper_cvtlq (void)
1034
{
1035
    union {
1036
        double d;
1037
        uint64_t u;
1038
    } p, q;
1039

    
1040
    p.d = FT0;
1041
    q.u = (p.u >> 29) & 0x3FFFFFFF;
1042
    q.u |= (p.u >> 32);
1043
    q.u = (int64_t)((int32_t)q.u);
1044
    FT0 = q.d;
1045
}
1046

    
1047
static always_inline void __helper_cvtql (int s, int v)
1048
{
1049
    union {
1050
        double d;
1051
        uint64_t u;
1052
    } p, q;
1053

    
1054
    p.d = FT0;
1055
    q.u = ((uint64_t)(p.u & 0xC0000000)) << 32;
1056
    q.u |= ((uint64_t)(p.u & 0x7FFFFFFF)) << 29;
1057
    FT0 = q.d;
1058
    if (v && (int64_t)((int32_t)p.u) != (int64_t)p.u) {
1059
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
1060
    }
1061
    if (s) {
1062
        /* TODO */
1063
    }
1064
}
1065

    
1066
void helper_cvtql (void)
1067
{
1068
    __helper_cvtql(0, 0);
1069
}
1070

    
1071
void helper_cvtqlv (void)
1072
{
1073
    __helper_cvtql(0, 1);
1074
}
1075

    
1076
void helper_cvtqlsv (void)
1077
{
1078
    __helper_cvtql(1, 1);
1079
}
1080

    
1081
void helper_cmpfeq (void)
1082
{
1083
    if (float64_eq(FT0, FT1, &FP_STATUS))
1084
        T0 = 1;
1085
    else
1086
        T0 = 0;
1087
}
1088

    
1089
void helper_cmpfne (void)
1090
{
1091
    if (float64_eq(FT0, FT1, &FP_STATUS))
1092
        T0 = 0;
1093
    else
1094
        T0 = 1;
1095
}
1096

    
1097
void helper_cmpflt (void)
1098
{
1099
    if (float64_lt(FT0, FT1, &FP_STATUS))
1100
        T0 = 1;
1101
    else
1102
        T0 = 0;
1103
}
1104

    
1105
void helper_cmpfle (void)
1106
{
1107
    if (float64_lt(FT0, FT1, &FP_STATUS))
1108
        T0 = 1;
1109
    else
1110
        T0 = 0;
1111
}
1112

    
1113
void helper_cmpfgt (void)
1114
{
1115
    if (float64_le(FT0, FT1, &FP_STATUS))
1116
        T0 = 0;
1117
    else
1118
        T0 = 1;
1119
}
1120

    
1121
void helper_cmpfge (void)
1122
{
1123
    if (float64_lt(FT0, FT1, &FP_STATUS))
1124
        T0 = 0;
1125
    else
1126
        T0 = 1;
1127
}
1128

    
1129
#if !defined (CONFIG_USER_ONLY)
1130
void helper_mfpr (int iprn)
1131
{
1132
    uint64_t val;
1133

    
1134
    if (cpu_alpha_mfpr(env, iprn, &val) == 0)
1135
        T0 = val;
1136
}
1137

    
1138
void helper_mtpr (int iprn)
1139
{
1140
    cpu_alpha_mtpr(env, iprn, T0, NULL);
1141
}
1142
#endif
1143

    
1144
/*****************************************************************************/
1145
/* Softmmu support */
1146
#if !defined (CONFIG_USER_ONLY)
1147

    
1148
#define GETPC() (__builtin_return_address(0))
1149

    
1150
/* XXX: the two following helpers are pure hacks.
1151
 *      Hopefully, we emulate the PALcode, then we should never see
1152
 *      HW_LD / HW_ST instructions.
1153
 */
1154
void helper_ld_phys_to_virt (void)
1155
{
1156
    uint64_t tlb_addr, physaddr;
1157
    int index, mmu_idx;
1158
    void *retaddr;
1159

    
1160
    mmu_idx = cpu_mmu_index(env);
1161
    index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1162
 redo:
1163
    tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1164
    if ((T0 & TARGET_PAGE_MASK) ==
1165
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1166
        physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1167
    } else {
1168
        /* the page is not in the TLB : fill it */
1169
        retaddr = GETPC();
1170
        tlb_fill(T0, 0, mmu_idx, retaddr);
1171
        goto redo;
1172
    }
1173
    T0 = physaddr;
1174
}
1175

    
1176
void helper_st_phys_to_virt (void)
1177
{
1178
    uint64_t tlb_addr, physaddr;
1179
    int index, mmu_idx;
1180
    void *retaddr;
1181

    
1182
    mmu_idx = cpu_mmu_index(env);
1183
    index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1184
 redo:
1185
    tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1186
    if ((T0 & TARGET_PAGE_MASK) ==
1187
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1188
        physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1189
    } else {
1190
        /* the page is not in the TLB : fill it */
1191
        retaddr = GETPC();
1192
        tlb_fill(T0, 1, mmu_idx, retaddr);
1193
        goto redo;
1194
    }
1195
    T0 = physaddr;
1196
}
1197

    
1198
#define MMUSUFFIX _mmu
1199

    
1200
#define SHIFT 0
1201
#include "softmmu_template.h"
1202

    
1203
#define SHIFT 1
1204
#include "softmmu_template.h"
1205

    
1206
#define SHIFT 2
1207
#include "softmmu_template.h"
1208

    
1209
#define SHIFT 3
1210
#include "softmmu_template.h"
1211

    
1212
/* try to fill the TLB and return an exception if error. If retaddr is
1213
   NULL, it means that the function was called in C code (i.e. not
1214
   from generated code or from helper.c) */
1215
/* XXX: fix it to restore all registers */
1216
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1217
{
1218
    TranslationBlock *tb;
1219
    CPUState *saved_env;
1220
    target_phys_addr_t pc;
1221
    int ret;
1222

    
1223
    /* XXX: hack to restore env in all cases, even if not called from
1224
       generated code */
1225
    saved_env = env;
1226
    env = cpu_single_env;
1227
    ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1228
    if (!likely(ret == 0)) {
1229
        if (likely(retaddr)) {
1230
            /* now we have a real cpu fault */
1231
            pc = (target_phys_addr_t)retaddr;
1232
            tb = tb_find_pc(pc);
1233
            if (likely(tb)) {
1234
                /* the PC is inside the translated code. It means that we have
1235
                   a virtual CPU fault */
1236
                cpu_restore_state(tb, env, pc, NULL);
1237
            }
1238
        }
1239
        /* Exception index and error code are already set */
1240
        cpu_loop_exit();
1241
    }
1242
    env = saved_env;
1243
}
1244

    
1245
#endif