Statistics
| Branch: | Revision:

root / target-alpha / op_helper.c @ 6ad02592

History | View | Annotate | Download (21.9 kB)

1
/*
2
 *  Alpha emulation cpu micro-operations helpers for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20

    
21
#include "exec.h"
22
#include "host-utils.h"
23
#include "softfloat.h"
24

    
25
#include "op_helper.h"
26

    
27
#define MEMSUFFIX _raw
28
#include "op_helper_mem.h"
29

    
30
#if !defined(CONFIG_USER_ONLY)
31
#define MEMSUFFIX _kernel
32
#include "op_helper_mem.h"
33

    
34
#define MEMSUFFIX _executive
35
#include "op_helper_mem.h"
36

    
37
#define MEMSUFFIX _supervisor
38
#include "op_helper_mem.h"
39

    
40
#define MEMSUFFIX _user
41
#include "op_helper_mem.h"
42

    
43
/* This is used for pal modes */
44
#define MEMSUFFIX _data
45
#include "op_helper_mem.h"
46
#endif
47

    
48
void helper_tb_flush (void)
49
{
50
    tlb_flush(env, 1);
51
}
52

    
53
void cpu_dump_EA (target_ulong EA);
54
void helper_print_mem_EA (target_ulong EA)
55
{
56
    cpu_dump_EA(EA);
57
}
58

    
59
/*****************************************************************************/
60
/* Exceptions processing helpers */
61
void helper_excp (int excp, int error)
62
{
63
    env->exception_index = excp;
64
    env->error_code = error;
65
    cpu_loop_exit();
66
}
67

    
68
uint64_t helper_amask (uint64_t arg)
69
{
70
    switch (env->implver) {
71
    case IMPLVER_2106x:
72
        /* EV4, EV45, LCA, LCA45 & EV5 */
73
        break;
74
    case IMPLVER_21164:
75
    case IMPLVER_21264:
76
    case IMPLVER_21364:
77
        arg &= ~env->amask;
78
        break;
79
    }
80
    return arg;
81
}
82

    
83
uint64_t helper_load_pcc (void)
84
{
85
    /* XXX: TODO */
86
    return 0;
87
}
88

    
89
uint64_t helper_load_implver (void)
90
{
91
    return env->implver;
92
}
93

    
94
void helper_load_fpcr (void)
95
{
96
    T0 = 0;
97
#ifdef CONFIG_SOFTFLOAT
98
    T0 |= env->fp_status.float_exception_flags << 52;
99
    if (env->fp_status.float_exception_flags)
100
        T0 |= 1ULL << 63;
101
    env->ipr[IPR_EXC_SUM] &= ~0x3E:
102
    env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
103
#endif
104
    switch (env->fp_status.float_rounding_mode) {
105
    case float_round_nearest_even:
106
        T0 |= 2ULL << 58;
107
        break;
108
    case float_round_down:
109
        T0 |= 1ULL << 58;
110
        break;
111
    case float_round_up:
112
        T0 |= 3ULL << 58;
113
        break;
114
    case float_round_to_zero:
115
        break;
116
    }
117
}
118

    
119
void helper_store_fpcr (void)
120
{
121
#ifdef CONFIG_SOFTFLOAT
122
    set_float_exception_flags((T0 >> 52) & 0x3F, &FP_STATUS);
123
#endif
124
    switch ((T0 >> 58) & 3) {
125
    case 0:
126
        set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
127
        break;
128
    case 1:
129
        set_float_rounding_mode(float_round_down, &FP_STATUS);
130
        break;
131
    case 2:
132
        set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
133
        break;
134
    case 3:
135
        set_float_rounding_mode(float_round_up, &FP_STATUS);
136
        break;
137
    }
138
}
139

    
140
spinlock_t intr_cpu_lock = SPIN_LOCK_UNLOCKED;
141

    
142
uint64_t helper_rs(void)
143
{
144
    uint64_t tmp;
145

    
146
    spin_lock(&intr_cpu_lock);
147
    tmp = env->intr_flag;
148
    env->intr_flag = 1;
149
    spin_unlock(&intr_cpu_lock);
150

    
151
    return tmp;
152
}
153

    
154
uint64_t helper_rc(void)
155
{
156
    uint64_t tmp;
157

    
158
    spin_lock(&intr_cpu_lock);
159
    tmp = env->intr_flag;
160
    env->intr_flag = 0;
161
    spin_unlock(&intr_cpu_lock);
162

    
163
    return tmp;
164
}
165

    
166
void helper_addqv (void)
167
{
168
    T2 = T0;
169
    T0 += T1;
170
    if (unlikely((T2 ^ T1 ^ (-1ULL)) & (T2 ^ T0) & (1ULL << 63))) {
171
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
172
    }
173
}
174

    
175
void helper_addlv (void)
176
{
177
    T2 = T0;
178
    T0 = (uint32_t)(T0 + T1);
179
    if (unlikely((T2 ^ T1 ^ (-1UL)) & (T2 ^ T0) & (1UL << 31))) {
180
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
181
    }
182
}
183

    
184
void helper_subqv (void)
185
{
186
    T2 = T0;
187
    T0 -= T1;
188
    if (unlikely(((~T2) ^ T0 ^ (-1ULL)) & ((~T2) ^ T1) & (1ULL << 63))) {
189
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
190
    }
191
}
192

    
193
void helper_sublv (void)
194
{
195
    T2 = T0;
196
    T0 = (uint32_t)(T0 - T1);
197
    if (unlikely(((~T2) ^ T0 ^ (-1UL)) & ((~T2) ^ T1) & (1UL << 31))) {
198
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
199
    }
200
}
201

    
202
void helper_mullv (void)
203
{
204
    int64_t res = (int64_t)T0 * (int64_t)T1;
205

    
206
    if (unlikely((int32_t)res != res)) {
207
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
208
    }
209
    T0 = (int64_t)((int32_t)res);
210
}
211

    
212
void helper_mulqv ()
213
{
214
    uint64_t tl, th;
215

    
216
    muls64(&tl, &th, T0, T1);
217
    /* If th != 0 && th != -1, then we had an overflow */
218
    if (unlikely((th + 1) > 1)) {
219
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
220
    }
221
    T0 = tl;
222
}
223

    
224
uint64_t helper_ctpop (uint64_t arg)
225
{
226
    return ctpop64(arg);
227
}
228

    
229
uint64_t helper_ctlz (uint64_t arg)
230
{
231
    return clz64(arg);
232
}
233

    
234
uint64_t helper_cttz (uint64_t arg)
235
{
236
    return ctz64(arg);
237
}
238

    
239
static always_inline uint64_t byte_zap (uint64_t op, uint8_t mskb)
240
{
241
    uint64_t mask;
242

    
243
    mask = 0;
244
    mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
245
    mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
246
    mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
247
    mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
248
    mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
249
    mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
250
    mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
251
    mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
252

    
253
    return op & ~mask;
254
}
255

    
256
uint64_t helper_mskbl(uint64_t val, uint64_t mask)
257
{
258
    return byte_zap(val, 0x01 << (mask & 7));
259
}
260

    
261
uint64_t helper_insbl(uint64_t val, uint64_t mask)
262
{
263
    val <<= (mask & 7) * 8;
264
    return byte_zap(val, ~(0x01 << (mask & 7)));
265
}
266

    
267
uint64_t helper_mskwl(uint64_t val, uint64_t mask)
268
{
269
    return byte_zap(val, 0x03 << (mask & 7));
270
}
271

    
272
uint64_t helper_inswl(uint64_t val, uint64_t mask)
273
{
274
    val <<= (mask & 7) * 8;
275
    return byte_zap(val, ~(0x03 << (mask & 7)));
276
}
277

    
278
uint64_t helper_mskll(uint64_t val, uint64_t mask)
279
{
280
    return byte_zap(val, 0x0F << (mask & 7));
281
}
282

    
283
uint64_t helper_insll(uint64_t val, uint64_t mask)
284
{
285
    val <<= (mask & 7) * 8;
286
    return byte_zap(val, ~(0x0F << (mask & 7)));
287
}
288

    
289
uint64_t helper_zap(uint64_t val, uint64_t mask)
290
{
291
    return byte_zap(val, mask);
292
}
293

    
294
uint64_t helper_zapnot(uint64_t val, uint64_t mask)
295
{
296
    return byte_zap(val, ~mask);
297
}
298

    
299
uint64_t helper_mskql(uint64_t val, uint64_t mask)
300
{
301
    return byte_zap(val, 0xFF << (mask & 7));
302
}
303

    
304
uint64_t helper_insql(uint64_t val, uint64_t mask)
305
{
306
    val <<= (mask & 7) * 8;
307
    return byte_zap(val, ~(0xFF << (mask & 7)));
308
}
309

    
310
uint64_t helper_mskwh(uint64_t val, uint64_t mask)
311
{
312
    return byte_zap(val, (0x03 << (mask & 7)) >> 8);
313
}
314

    
315
uint64_t helper_inswh(uint64_t val, uint64_t mask)
316
{
317
    val >>= 64 - ((mask & 7) * 8);
318
    return byte_zap(val, ~((0x03 << (mask & 7)) >> 8));
319
}
320

    
321
uint64_t helper_msklh(uint64_t val, uint64_t mask)
322
{
323
    return byte_zap(val, (0x0F << (mask & 7)) >> 8);
324
}
325

    
326
uint64_t helper_inslh(uint64_t val, uint64_t mask)
327
{
328
    val >>= 64 - ((mask & 7) * 8);
329
    return byte_zap(val, ~((0x0F << (mask & 7)) >> 8));
330
}
331

    
332
uint64_t helper_mskqh(uint64_t val, uint64_t mask)
333
{
334
    return byte_zap(val, (0xFF << (mask & 7)) >> 8);
335
}
336

    
337
uint64_t helper_insqh(uint64_t val, uint64_t mask)
338
{
339
    val >>= 64 - ((mask & 7) * 8);
340
    return byte_zap(val, ~((0xFF << (mask & 7)) >> 8));
341
}
342

    
343
void helper_cmpbge (void)
344
{
345
    uint8_t opa, opb, res;
346
    int i;
347

    
348
    res = 0;
349
    for (i = 0; i < 7; i++) {
350
        opa = T0 >> (i * 8);
351
        opb = T1 >> (i * 8);
352
        if (opa >= opb)
353
            res |= 1 << i;
354
    }
355
    T0 = res;
356
}
357

    
358
void helper_cmov_fir (int freg)
359
{
360
    if (FT0 != 0)
361
        env->fir[freg] = FT1;
362
}
363

    
364
void helper_sqrts (void)
365
{
366
    FT0 = float32_sqrt(FT0, &FP_STATUS);
367
}
368

    
369
void helper_cpys (void)
370
{
371
    union {
372
        double d;
373
        uint64_t i;
374
    } p, q, r;
375

    
376
    p.d = FT0;
377
    q.d = FT1;
378
    r.i = p.i & 0x8000000000000000ULL;
379
    r.i |= q.i & ~0x8000000000000000ULL;
380
    FT0 = r.d;
381
}
382

    
383
void helper_cpysn (void)
384
{
385
    union {
386
        double d;
387
        uint64_t i;
388
    } p, q, r;
389

    
390
    p.d = FT0;
391
    q.d = FT1;
392
    r.i = (~p.i) & 0x8000000000000000ULL;
393
    r.i |= q.i & ~0x8000000000000000ULL;
394
    FT0 = r.d;
395
}
396

    
397
void helper_cpyse (void)
398
{
399
    union {
400
        double d;
401
        uint64_t i;
402
    } p, q, r;
403

    
404
    p.d = FT0;
405
    q.d = FT1;
406
    r.i = p.i & 0xFFF0000000000000ULL;
407
    r.i |= q.i & ~0xFFF0000000000000ULL;
408
    FT0 = r.d;
409
}
410

    
411
void helper_itofs (void)
412
{
413
    union {
414
        double d;
415
        uint64_t i;
416
    } p;
417

    
418
    p.d = FT0;
419
    FT0 = int64_to_float32(p.i, &FP_STATUS);
420
}
421

    
422
void helper_ftois (void)
423
{
424
    union {
425
        double d;
426
        uint64_t i;
427
    } p;
428

    
429
    p.i = float32_to_int64(FT0, &FP_STATUS);
430
    FT0 = p.d;
431
}
432

    
433
void helper_sqrtt (void)
434
{
435
    FT0 = float64_sqrt(FT0, &FP_STATUS);
436
}
437

    
438
void helper_cmptun (void)
439
{
440
    union {
441
        double d;
442
        uint64_t i;
443
    } p;
444

    
445
    p.i = 0;
446
    if (float64_is_nan(FT0) || float64_is_nan(FT1))
447
        p.i = 0x4000000000000000ULL;
448
    FT0 = p.d;
449
}
450

    
451
void helper_cmpteq (void)
452
{
453
    union {
454
        double d;
455
        uint64_t i;
456
    } p;
457

    
458
    p.i = 0;
459
    if (float64_eq(FT0, FT1, &FP_STATUS))
460
        p.i = 0x4000000000000000ULL;
461
    FT0 = p.d;
462
}
463

    
464
void helper_cmptle (void)
465
{
466
    union {
467
        double d;
468
        uint64_t i;
469
    } p;
470

    
471
    p.i = 0;
472
    if (float64_le(FT0, FT1, &FP_STATUS))
473
        p.i = 0x4000000000000000ULL;
474
    FT0 = p.d;
475
}
476

    
477
void helper_cmptlt (void)
478
{
479
    union {
480
        double d;
481
        uint64_t i;
482
    } p;
483

    
484
    p.i = 0;
485
    if (float64_lt(FT0, FT1, &FP_STATUS))
486
        p.i = 0x4000000000000000ULL;
487
    FT0 = p.d;
488
}
489

    
490
void helper_itoft (void)
491
{
492
    union {
493
        double d;
494
        uint64_t i;
495
    } p;
496

    
497
    p.d = FT0;
498
    FT0 = int64_to_float64(p.i, &FP_STATUS);
499
}
500

    
501
void helper_ftoit (void)
502
{
503
    union {
504
        double d;
505
        uint64_t i;
506
    } p;
507

    
508
    p.i = float64_to_int64(FT0, &FP_STATUS);
509
    FT0 = p.d;
510
}
511

    
512
static always_inline int vaxf_is_valid (float ff)
513
{
514
    union {
515
        float f;
516
        uint32_t i;
517
    } p;
518
    uint32_t exp, mant;
519

    
520
    p.f = ff;
521
    exp = (p.i >> 23) & 0xFF;
522
    mant = p.i & 0x007FFFFF;
523
    if (exp == 0 && ((p.i & 0x80000000) || mant != 0)) {
524
        /* Reserved operands / Dirty zero */
525
        return 0;
526
    }
527

    
528
    return 1;
529
}
530

    
531
static always_inline float vaxf_to_ieee32 (float ff)
532
{
533
    union {
534
        float f;
535
        uint32_t i;
536
    } p;
537
    uint32_t exp;
538

    
539
    p.f = ff;
540
    exp = (p.i >> 23) & 0xFF;
541
    if (exp < 3) {
542
        /* Underflow */
543
        p.f = 0.0;
544
    } else {
545
        p.f *= 0.25;
546
    }
547

    
548
    return p.f;
549
}
550

    
551
static always_inline float ieee32_to_vaxf (float fi)
552
{
553
    union {
554
        float f;
555
        uint32_t i;
556
    } p;
557
    uint32_t exp, mant;
558

    
559
    p.f = fi;
560
    exp = (p.i >> 23) & 0xFF;
561
    mant = p.i & 0x007FFFFF;
562
    if (exp == 255) {
563
        /* NaN or infinity */
564
        p.i = 1;
565
    } else if (exp == 0) {
566
        if (mant == 0) {
567
            /* Zero */
568
            p.i = 0;
569
        } else {
570
            /* Denormalized */
571
            p.f *= 2.0;
572
        }
573
    } else {
574
        if (exp >= 253) {
575
            /* Overflow */
576
            p.i = 1;
577
        } else {
578
            p.f *= 4.0;
579
        }
580
    }
581

    
582
    return p.f;
583
}
584

    
585
void helper_addf (void)
586
{
587
    float ft0, ft1, ft2;
588

    
589
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
590
        /* XXX: TODO */
591
    }
592
    ft0 = vaxf_to_ieee32(FT0);
593
    ft1 = vaxf_to_ieee32(FT1);
594
    ft2 = float32_add(ft0, ft1, &FP_STATUS);
595
    FT0 = ieee32_to_vaxf(ft2);
596
}
597

    
598
void helper_subf (void)
599
{
600
    float ft0, ft1, ft2;
601

    
602
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
603
        /* XXX: TODO */
604
    }
605
    ft0 = vaxf_to_ieee32(FT0);
606
    ft1 = vaxf_to_ieee32(FT1);
607
    ft2 = float32_sub(ft0, ft1, &FP_STATUS);
608
    FT0 = ieee32_to_vaxf(ft2);
609
}
610

    
611
void helper_mulf (void)
612
{
613
    float ft0, ft1, ft2;
614

    
615
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
616
        /* XXX: TODO */
617
    }
618
    ft0 = vaxf_to_ieee32(FT0);
619
    ft1 = vaxf_to_ieee32(FT1);
620
    ft2 = float32_mul(ft0, ft1, &FP_STATUS);
621
    FT0 = ieee32_to_vaxf(ft2);
622
}
623

    
624
void helper_divf (void)
625
{
626
    float ft0, ft1, ft2;
627

    
628
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
629
        /* XXX: TODO */
630
    }
631
    ft0 = vaxf_to_ieee32(FT0);
632
    ft1 = vaxf_to_ieee32(FT1);
633
    ft2 = float32_div(ft0, ft1, &FP_STATUS);
634
    FT0 = ieee32_to_vaxf(ft2);
635
}
636

    
637
void helper_sqrtf (void)
638
{
639
    float ft0, ft1;
640

    
641
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
642
        /* XXX: TODO */
643
    }
644
    ft0 = vaxf_to_ieee32(FT0);
645
    ft1 = float32_sqrt(ft0, &FP_STATUS);
646
    FT0 = ieee32_to_vaxf(ft1);
647
}
648

    
649
void helper_itoff (void)
650
{
651
    /* XXX: TODO */
652
}
653

    
654
static always_inline int vaxg_is_valid (double ff)
655
{
656
    union {
657
        double f;
658
        uint64_t i;
659
    } p;
660
    uint64_t exp, mant;
661

    
662
    p.f = ff;
663
    exp = (p.i >> 52) & 0x7FF;
664
    mant = p.i & 0x000FFFFFFFFFFFFFULL;
665
    if (exp == 0 && ((p.i & 0x8000000000000000ULL) || mant != 0)) {
666
        /* Reserved operands / Dirty zero */
667
        return 0;
668
    }
669

    
670
    return 1;
671
}
672

    
673
static always_inline double vaxg_to_ieee64 (double fg)
674
{
675
    union {
676
        double f;
677
        uint64_t i;
678
    } p;
679
    uint32_t exp;
680

    
681
    p.f = fg;
682
    exp = (p.i >> 52) & 0x7FF;
683
    if (exp < 3) {
684
        /* Underflow */
685
        p.f = 0.0;
686
    } else {
687
        p.f *= 0.25;
688
    }
689

    
690
    return p.f;
691
}
692

    
693
static always_inline double ieee64_to_vaxg (double fi)
694
{
695
    union {
696
        double f;
697
        uint64_t i;
698
    } p;
699
    uint64_t mant;
700
    uint32_t exp;
701

    
702
    p.f = fi;
703
    exp = (p.i >> 52) & 0x7FF;
704
    mant = p.i & 0x000FFFFFFFFFFFFFULL;
705
    if (exp == 255) {
706
        /* NaN or infinity */
707
        p.i = 1; /* VAX dirty zero */
708
    } else if (exp == 0) {
709
        if (mant == 0) {
710
            /* Zero */
711
            p.i = 0;
712
        } else {
713
            /* Denormalized */
714
            p.f *= 2.0;
715
        }
716
    } else {
717
        if (exp >= 2045) {
718
            /* Overflow */
719
            p.i = 1; /* VAX dirty zero */
720
        } else {
721
            p.f *= 4.0;
722
        }
723
    }
724

    
725
    return p.f;
726
}
727

    
728
void helper_addg (void)
729
{
730
    double ft0, ft1, ft2;
731

    
732
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
733
        /* XXX: TODO */
734
    }
735
    ft0 = vaxg_to_ieee64(FT0);
736
    ft1 = vaxg_to_ieee64(FT1);
737
    ft2 = float64_add(ft0, ft1, &FP_STATUS);
738
    FT0 = ieee64_to_vaxg(ft2);
739
}
740

    
741
void helper_subg (void)
742
{
743
    double ft0, ft1, ft2;
744

    
745
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
746
        /* XXX: TODO */
747
    }
748
    ft0 = vaxg_to_ieee64(FT0);
749
    ft1 = vaxg_to_ieee64(FT1);
750
    ft2 = float64_sub(ft0, ft1, &FP_STATUS);
751
    FT0 = ieee64_to_vaxg(ft2);
752
}
753

    
754
void helper_mulg (void)
755
{
756
    double ft0, ft1, ft2;
757

    
758
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
759
        /* XXX: TODO */
760
    }
761
    ft0 = vaxg_to_ieee64(FT0);
762
    ft1 = vaxg_to_ieee64(FT1);
763
    ft2 = float64_mul(ft0, ft1, &FP_STATUS);
764
    FT0 = ieee64_to_vaxg(ft2);
765
}
766

    
767
void helper_divg (void)
768
{
769
    double ft0, ft1, ft2;
770

    
771
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
772
        /* XXX: TODO */
773
    }
774
    ft0 = vaxg_to_ieee64(FT0);
775
    ft1 = vaxg_to_ieee64(FT1);
776
    ft2 = float64_div(ft0, ft1, &FP_STATUS);
777
    FT0 = ieee64_to_vaxg(ft2);
778
}
779

    
780
void helper_sqrtg (void)
781
{
782
    double ft0, ft1;
783

    
784
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
785
        /* XXX: TODO */
786
    }
787
    ft0 = vaxg_to_ieee64(FT0);
788
    ft1 = float64_sqrt(ft0, &FP_STATUS);
789
    FT0 = ieee64_to_vaxg(ft1);
790
}
791

    
792
void helper_cmpgeq (void)
793
{
794
    union {
795
        double d;
796
        uint64_t u;
797
    } p;
798
    double ft0, ft1;
799

    
800
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
801
        /* XXX: TODO */
802
    }
803
    ft0 = vaxg_to_ieee64(FT0);
804
    ft1 = vaxg_to_ieee64(FT1);
805
    p.u = 0;
806
    if (float64_eq(ft0, ft1, &FP_STATUS))
807
        p.u = 0x4000000000000000ULL;
808
    FT0 = p.d;
809
}
810

    
811
void helper_cmpglt (void)
812
{
813
    union {
814
        double d;
815
        uint64_t u;
816
    } p;
817
    double ft0, ft1;
818

    
819
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
820
        /* XXX: TODO */
821
    }
822
    ft0 = vaxg_to_ieee64(FT0);
823
    ft1 = vaxg_to_ieee64(FT1);
824
    p.u = 0;
825
    if (float64_lt(ft0, ft1, &FP_STATUS))
826
        p.u = 0x4000000000000000ULL;
827
    FT0 = p.d;
828
}
829

    
830
void helper_cmpgle (void)
831
{
832
    union {
833
        double d;
834
        uint64_t u;
835
    } p;
836
    double ft0, ft1;
837

    
838
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
839
        /* XXX: TODO */
840
    }
841
    ft0 = vaxg_to_ieee64(FT0);
842
    ft1 = vaxg_to_ieee64(FT1);
843
    p.u = 0;
844
    if (float64_le(ft0, ft1, &FP_STATUS))
845
        p.u = 0x4000000000000000ULL;
846
    FT0 = p.d;
847
}
848

    
849
void helper_cvtqs (void)
850
{
851
    union {
852
        double d;
853
        uint64_t u;
854
    } p;
855

    
856
    p.d = FT0;
857
    FT0 = (float)p.u;
858
}
859

    
860
void helper_cvttq (void)
861
{
862
    union {
863
        double d;
864
        uint64_t u;
865
    } p;
866

    
867
    p.u = FT0;
868
    FT0 = p.d;
869
}
870

    
871
void helper_cvtqt (void)
872
{
873
    union {
874
        double d;
875
        uint64_t u;
876
    } p;
877

    
878
    p.d = FT0;
879
    FT0 = p.u;
880
}
881

    
882
void helper_cvtqf (void)
883
{
884
    union {
885
        double d;
886
        uint64_t u;
887
    } p;
888

    
889
    p.d = FT0;
890
    FT0 = ieee32_to_vaxf(p.u);
891
}
892

    
893
void helper_cvtgf (void)
894
{
895
    double ft0;
896

    
897
    ft0 = vaxg_to_ieee64(FT0);
898
    FT0 = ieee32_to_vaxf(ft0);
899
}
900

    
901
void helper_cvtgd (void)
902
{
903
    /* XXX: TODO */
904
}
905

    
906
void helper_cvtgq (void)
907
{
908
    union {
909
        double d;
910
        uint64_t u;
911
    } p;
912

    
913
    p.u = vaxg_to_ieee64(FT0);
914
    FT0 = p.d;
915
}
916

    
917
void helper_cvtqg (void)
918
{
919
    union {
920
        double d;
921
        uint64_t u;
922
    } p;
923

    
924
    p.d = FT0;
925
    FT0 = ieee64_to_vaxg(p.u);
926
}
927

    
928
void helper_cvtdg (void)
929
{
930
    /* XXX: TODO */
931
}
932

    
933
void helper_cvtlq (void)
934
{
935
    union {
936
        double d;
937
        uint64_t u;
938
    } p, q;
939

    
940
    p.d = FT0;
941
    q.u = (p.u >> 29) & 0x3FFFFFFF;
942
    q.u |= (p.u >> 32);
943
    q.u = (int64_t)((int32_t)q.u);
944
    FT0 = q.d;
945
}
946

    
947
static always_inline void __helper_cvtql (int s, int v)
948
{
949
    union {
950
        double d;
951
        uint64_t u;
952
    } p, q;
953

    
954
    p.d = FT0;
955
    q.u = ((uint64_t)(p.u & 0xC0000000)) << 32;
956
    q.u |= ((uint64_t)(p.u & 0x7FFFFFFF)) << 29;
957
    FT0 = q.d;
958
    if (v && (int64_t)((int32_t)p.u) != (int64_t)p.u) {
959
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
960
    }
961
    if (s) {
962
        /* TODO */
963
    }
964
}
965

    
966
void helper_cvtql (void)
967
{
968
    __helper_cvtql(0, 0);
969
}
970

    
971
void helper_cvtqlv (void)
972
{
973
    __helper_cvtql(0, 1);
974
}
975

    
976
void helper_cvtqlsv (void)
977
{
978
    __helper_cvtql(1, 1);
979
}
980

    
981
void helper_cmpfeq (void)
982
{
983
    if (float64_eq(FT0, FT1, &FP_STATUS))
984
        T0 = 1;
985
    else
986
        T0 = 0;
987
}
988

    
989
void helper_cmpfne (void)
990
{
991
    if (float64_eq(FT0, FT1, &FP_STATUS))
992
        T0 = 0;
993
    else
994
        T0 = 1;
995
}
996

    
997
void helper_cmpflt (void)
998
{
999
    if (float64_lt(FT0, FT1, &FP_STATUS))
1000
        T0 = 1;
1001
    else
1002
        T0 = 0;
1003
}
1004

    
1005
void helper_cmpfle (void)
1006
{
1007
    if (float64_lt(FT0, FT1, &FP_STATUS))
1008
        T0 = 1;
1009
    else
1010
        T0 = 0;
1011
}
1012

    
1013
void helper_cmpfgt (void)
1014
{
1015
    if (float64_le(FT0, FT1, &FP_STATUS))
1016
        T0 = 0;
1017
    else
1018
        T0 = 1;
1019
}
1020

    
1021
void helper_cmpfge (void)
1022
{
1023
    if (float64_lt(FT0, FT1, &FP_STATUS))
1024
        T0 = 0;
1025
    else
1026
        T0 = 1;
1027
}
1028

    
1029
#if !defined (CONFIG_USER_ONLY)
1030
void helper_mfpr (int iprn)
1031
{
1032
    uint64_t val;
1033

    
1034
    if (cpu_alpha_mfpr(env, iprn, &val) == 0)
1035
        T0 = val;
1036
}
1037

    
1038
void helper_mtpr (int iprn)
1039
{
1040
    cpu_alpha_mtpr(env, iprn, T0, NULL);
1041
}
1042
#endif
1043

    
1044
#if defined(HOST_SPARC) || defined(HOST_SPARC64)
1045
void helper_reset_FT0 (void)
1046
{
1047
    FT0 = 0;
1048
}
1049

    
1050
void helper_reset_FT1 (void)
1051
{
1052
    FT1 = 0;
1053
}
1054

    
1055
void helper_reset_FT2 (void)
1056
{
1057
    FT2 = 0;
1058
}
1059
#endif
1060

    
1061
/*****************************************************************************/
1062
/* Softmmu support */
1063
#if !defined (CONFIG_USER_ONLY)
1064

    
1065
/* XXX: the two following helpers are pure hacks.
1066
 *      Hopefully, we emulate the PALcode, then we should never see
1067
 *      HW_LD / HW_ST instructions.
1068
 */
1069
void helper_ld_phys_to_virt (void)
1070
{
1071
    uint64_t tlb_addr, physaddr;
1072
    int index, mmu_idx;
1073
    void *retaddr;
1074

    
1075
    mmu_idx = cpu_mmu_index(env);
1076
    index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1077
 redo:
1078
    tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1079
    if ((T0 & TARGET_PAGE_MASK) ==
1080
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1081
        physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1082
    } else {
1083
        /* the page is not in the TLB : fill it */
1084
        retaddr = GETPC();
1085
        tlb_fill(T0, 0, mmu_idx, retaddr);
1086
        goto redo;
1087
    }
1088
    T0 = physaddr;
1089
}
1090

    
1091
void helper_st_phys_to_virt (void)
1092
{
1093
    uint64_t tlb_addr, physaddr;
1094
    int index, mmu_idx;
1095
    void *retaddr;
1096

    
1097
    mmu_idx = cpu_mmu_index(env);
1098
    index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1099
 redo:
1100
    tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1101
    if ((T0 & TARGET_PAGE_MASK) ==
1102
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1103
        physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1104
    } else {
1105
        /* the page is not in the TLB : fill it */
1106
        retaddr = GETPC();
1107
        tlb_fill(T0, 1, mmu_idx, retaddr);
1108
        goto redo;
1109
    }
1110
    T0 = physaddr;
1111
}
1112

    
1113
#define MMUSUFFIX _mmu
1114

    
1115
#define SHIFT 0
1116
#include "softmmu_template.h"
1117

    
1118
#define SHIFT 1
1119
#include "softmmu_template.h"
1120

    
1121
#define SHIFT 2
1122
#include "softmmu_template.h"
1123

    
1124
#define SHIFT 3
1125
#include "softmmu_template.h"
1126

    
1127
/* try to fill the TLB and return an exception if error. If retaddr is
1128
   NULL, it means that the function was called in C code (i.e. not
1129
   from generated code or from helper.c) */
1130
/* XXX: fix it to restore all registers */
1131
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1132
{
1133
    TranslationBlock *tb;
1134
    CPUState *saved_env;
1135
    unsigned long pc;
1136
    int ret;
1137

    
1138
    /* XXX: hack to restore env in all cases, even if not called from
1139
       generated code */
1140
    saved_env = env;
1141
    env = cpu_single_env;
1142
    ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1143
    if (!likely(ret == 0)) {
1144
        if (likely(retaddr)) {
1145
            /* now we have a real cpu fault */
1146
            pc = (unsigned long)retaddr;
1147
            tb = tb_find_pc(pc);
1148
            if (likely(tb)) {
1149
                /* the PC is inside the translated code. It means that we have
1150
                   a virtual CPU fault */
1151
                cpu_restore_state(tb, env, pc, NULL);
1152
            }
1153
        }
1154
        /* Exception index and error code are already set */
1155
        cpu_loop_exit();
1156
    }
1157
    env = saved_env;
1158
}
1159

    
1160
#endif