Statistics
| Branch: | Revision:

root / target-alpha / op_helper.c @ f18cd223

History | View | Annotate | Download (23.4 kB)

1
/*
2
 *  Alpha emulation cpu micro-operations helpers for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20

    
21
#include "exec.h"
22
#include "host-utils.h"
23
#include "softfloat.h"
24

    
25
#include "op_helper.h"
26

    
27
void helper_tb_flush (void)
28
{
29
    tlb_flush(env, 1);
30
}
31

    
32
void cpu_dump_EA (target_ulong EA);
33
void helper_print_mem_EA (target_ulong EA)
34
{
35
    cpu_dump_EA(EA);
36
}
37

    
38
/*****************************************************************************/
39
/* Exceptions processing helpers */
40
void helper_excp (int excp, int error)
41
{
42
    env->exception_index = excp;
43
    env->error_code = error;
44
    cpu_loop_exit();
45
}
46

    
47
uint64_t helper_amask (uint64_t arg)
48
{
49
    switch (env->implver) {
50
    case IMPLVER_2106x:
51
        /* EV4, EV45, LCA, LCA45 & EV5 */
52
        break;
53
    case IMPLVER_21164:
54
    case IMPLVER_21264:
55
    case IMPLVER_21364:
56
        arg &= ~env->amask;
57
        break;
58
    }
59
    return arg;
60
}
61

    
62
uint64_t helper_load_pcc (void)
63
{
64
    /* XXX: TODO */
65
    return 0;
66
}
67

    
68
uint64_t helper_load_implver (void)
69
{
70
    return env->implver;
71
}
72

    
73
uint64_t helper_load_fpcr (void)
74
{
75
    uint64_t ret = 0;
76
#ifdef CONFIG_SOFTFLOAT
77
    ret |= env->fp_status.float_exception_flags << 52;
78
    if (env->fp_status.float_exception_flags)
79
        ret |= 1ULL << 63;
80
    env->ipr[IPR_EXC_SUM] &= ~0x3E:
81
    env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
82
#endif
83
    switch (env->fp_status.float_rounding_mode) {
84
    case float_round_nearest_even:
85
        ret |= 2ULL << 58;
86
        break;
87
    case float_round_down:
88
        ret |= 1ULL << 58;
89
        break;
90
    case float_round_up:
91
        ret |= 3ULL << 58;
92
        break;
93
    case float_round_to_zero:
94
        break;
95
    }
96
    return ret;
97
}
98

    
99
void helper_store_fpcr (uint64_t val)
100
{
101
#ifdef CONFIG_SOFTFLOAT
102
    set_float_exception_flags((val >> 52) & 0x3F, &FP_STATUS);
103
#endif
104
    switch ((val >> 58) & 3) {
105
    case 0:
106
        set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
107
        break;
108
    case 1:
109
        set_float_rounding_mode(float_round_down, &FP_STATUS);
110
        break;
111
    case 2:
112
        set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
113
        break;
114
    case 3:
115
        set_float_rounding_mode(float_round_up, &FP_STATUS);
116
        break;
117
    }
118
}
119

    
120
spinlock_t intr_cpu_lock = SPIN_LOCK_UNLOCKED;
121

    
122
uint64_t helper_rs(void)
123
{
124
    uint64_t tmp;
125

    
126
    spin_lock(&intr_cpu_lock);
127
    tmp = env->intr_flag;
128
    env->intr_flag = 1;
129
    spin_unlock(&intr_cpu_lock);
130

    
131
    return tmp;
132
}
133

    
134
uint64_t helper_rc(void)
135
{
136
    uint64_t tmp;
137

    
138
    spin_lock(&intr_cpu_lock);
139
    tmp = env->intr_flag;
140
    env->intr_flag = 0;
141
    spin_unlock(&intr_cpu_lock);
142

    
143
    return tmp;
144
}
145

    
146
uint64_t helper_addqv (uint64_t op1, uint64_t op2)
147
{
148
    uint64_t tmp = op1;
149
    op1 += op2;
150
    if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
151
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
152
    }
153
    return op1;
154
}
155

    
156
uint64_t helper_addlv (uint64_t op1, uint64_t op2)
157
{
158
    uint64_t tmp = op1;
159
    op1 = (uint32_t)(op1 + op2);
160
    if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
161
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
162
    }
163
    return op1;
164
}
165

    
166
uint64_t helper_subqv (uint64_t op1, uint64_t op2)
167
{
168
    uint64_t tmp = op1;
169
    op1 -= op2;
170
    if (unlikely(((~tmp) ^ op1 ^ (-1ULL)) & ((~tmp) ^ op2) & (1ULL << 63))) {
171
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
172
    }
173
    return op1;
174
}
175

    
176
uint64_t helper_sublv (uint64_t op1, uint64_t op2)
177
{
178
    uint64_t tmp = op1;
179
    op1 = (uint32_t)(op1 - op2);
180
    if (unlikely(((~tmp) ^ op1 ^ (-1UL)) & ((~tmp) ^ op2) & (1UL << 31))) {
181
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
182
    }
183
    return op1;
184
}
185

    
186
uint64_t helper_mullv (uint64_t op1, uint64_t op2)
187
{
188
    int64_t res = (int64_t)op1 * (int64_t)op2;
189

    
190
    if (unlikely((int32_t)res != res)) {
191
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
192
    }
193
    return (int64_t)((int32_t)res);
194
}
195

    
196
uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
197
{
198
    uint64_t tl, th;
199

    
200
    muls64(&tl, &th, op1, op2);
201
    /* If th != 0 && th != -1, then we had an overflow */
202
    if (unlikely((th + 1) > 1)) {
203
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
204
    }
205
    return tl;
206
}
207

    
208
uint64_t helper_umulh (uint64_t op1, uint64_t op2)
209
{
210
    uint64_t tl, th;
211

    
212
    mulu64(&tl, &th, op1, op2);
213
    return th;
214
}
215

    
216
uint64_t helper_ctpop (uint64_t arg)
217
{
218
    return ctpop64(arg);
219
}
220

    
221
uint64_t helper_ctlz (uint64_t arg)
222
{
223
    return clz64(arg);
224
}
225

    
226
uint64_t helper_cttz (uint64_t arg)
227
{
228
    return ctz64(arg);
229
}
230

    
231
static always_inline uint64_t byte_zap (uint64_t op, uint8_t mskb)
232
{
233
    uint64_t mask;
234

    
235
    mask = 0;
236
    mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
237
    mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
238
    mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
239
    mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
240
    mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
241
    mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
242
    mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
243
    mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
244

    
245
    return op & ~mask;
246
}
247

    
248
uint64_t helper_mskbl(uint64_t val, uint64_t mask)
249
{
250
    return byte_zap(val, 0x01 << (mask & 7));
251
}
252

    
253
uint64_t helper_insbl(uint64_t val, uint64_t mask)
254
{
255
    val <<= (mask & 7) * 8;
256
    return byte_zap(val, ~(0x01 << (mask & 7)));
257
}
258

    
259
uint64_t helper_mskwl(uint64_t val, uint64_t mask)
260
{
261
    return byte_zap(val, 0x03 << (mask & 7));
262
}
263

    
264
uint64_t helper_inswl(uint64_t val, uint64_t mask)
265
{
266
    val <<= (mask & 7) * 8;
267
    return byte_zap(val, ~(0x03 << (mask & 7)));
268
}
269

    
270
uint64_t helper_mskll(uint64_t val, uint64_t mask)
271
{
272
    return byte_zap(val, 0x0F << (mask & 7));
273
}
274

    
275
uint64_t helper_insll(uint64_t val, uint64_t mask)
276
{
277
    val <<= (mask & 7) * 8;
278
    return byte_zap(val, ~(0x0F << (mask & 7)));
279
}
280

    
281
uint64_t helper_zap(uint64_t val, uint64_t mask)
282
{
283
    return byte_zap(val, mask);
284
}
285

    
286
uint64_t helper_zapnot(uint64_t val, uint64_t mask)
287
{
288
    return byte_zap(val, ~mask);
289
}
290

    
291
uint64_t helper_mskql(uint64_t val, uint64_t mask)
292
{
293
    return byte_zap(val, 0xFF << (mask & 7));
294
}
295

    
296
uint64_t helper_insql(uint64_t val, uint64_t mask)
297
{
298
    val <<= (mask & 7) * 8;
299
    return byte_zap(val, ~(0xFF << (mask & 7)));
300
}
301

    
302
uint64_t helper_mskwh(uint64_t val, uint64_t mask)
303
{
304
    return byte_zap(val, (0x03 << (mask & 7)) >> 8);
305
}
306

    
307
uint64_t helper_inswh(uint64_t val, uint64_t mask)
308
{
309
    val >>= 64 - ((mask & 7) * 8);
310
    return byte_zap(val, ~((0x03 << (mask & 7)) >> 8));
311
}
312

    
313
uint64_t helper_msklh(uint64_t val, uint64_t mask)
314
{
315
    return byte_zap(val, (0x0F << (mask & 7)) >> 8);
316
}
317

    
318
uint64_t helper_inslh(uint64_t val, uint64_t mask)
319
{
320
    val >>= 64 - ((mask & 7) * 8);
321
    return byte_zap(val, ~((0x0F << (mask & 7)) >> 8));
322
}
323

    
324
uint64_t helper_mskqh(uint64_t val, uint64_t mask)
325
{
326
    return byte_zap(val, (0xFF << (mask & 7)) >> 8);
327
}
328

    
329
uint64_t helper_insqh(uint64_t val, uint64_t mask)
330
{
331
    val >>= 64 - ((mask & 7) * 8);
332
    return byte_zap(val, ~((0xFF << (mask & 7)) >> 8));
333
}
334

    
335
uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
336
{
337
    uint8_t opa, opb, res;
338
    int i;
339

    
340
    res = 0;
341
    for (i = 0; i < 7; i++) {
342
        opa = op1 >> (i * 8);
343
        opb = op2 >> (i * 8);
344
        if (opa >= opb)
345
            res |= 1 << i;
346
    }
347
    return res;
348
}
349

    
350
/* Floating point helpers */
351

    
352
/* F floating (VAX) */
353
static always_inline uint64_t float32_to_f (float32 fa)
354
{
355
    uint32_t a;
356
    uint64_t r, exp, mant, sig;
357

    
358
    a = *(uint32_t*)(&fa);
359
    sig = ((uint64_t)a & 0x80000000) << 32;
360
    exp = (a >> 23) & 0xff;
361
    mant = ((uint64_t)a & 0x007fffff) << 29;
362

    
363
    if (exp == 255) {
364
        /* NaN or infinity */
365
        r = 1; /* VAX dirty zero */
366
    } else if (exp == 0) {
367
        if (mant == 0) {
368
            /* Zero */
369
            r = 0;
370
        } else {
371
            /* Denormalized */
372
            r = sig | ((exp + 1) << 52) | mant;
373
        }
374
    } else {
375
        if (exp >= 253) {
376
            /* Overflow */
377
            r = 1; /* VAX dirty zero */
378
        } else {
379
            r = sig | ((exp + 2) << 52);
380
        }
381
    }
382

    
383
    return r;
384
}
385

    
386
static always_inline float32 f_to_float32 (uint64_t a)
387
{
388
    uint32_t r, exp, mant_sig;
389

    
390
    exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
391
    mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
392

    
393
    if (unlikely(!exp && mant_sig)) {
394
        /* Reserved operands / Dirty zero */
395
        helper_excp(EXCP_OPCDEC, 0);
396
    }
397

    
398
    if (exp < 3) {
399
        /* Underflow */
400
        r = 0;
401
    } else {
402
        r = ((exp - 2) << 23) | mant_sig;
403
    }
404

    
405
    return *(float32*)(&a);
406
}
407

    
408
uint32_t helper_f_to_memory (uint64_t a)
409
{
410
    uint32_t r;
411
    r =  (a & 0x00001fffe0000000ull) >> 13;
412
    r |= (a & 0x07ffe00000000000ull) >> 45;
413
    r |= (a & 0xc000000000000000ull) >> 48;
414
    return r;
415
}
416

    
417
uint64_t helper_memory_to_f (uint32_t a)
418
{
419
    uint64_t r;
420
    r =  ((uint64_t)(a & 0x0000c000)) << 48;
421
    r |= ((uint64_t)(a & 0x003fffff)) << 45;
422
    r |= ((uint64_t)(a & 0xffff0000)) << 13;
423
    if (!(a & 0x00004000))
424
        r |= 0x7ll << 59;
425
    return r;
426
}
427

    
428
uint64_t helper_addf (uint64_t a, uint64_t b)
429
{
430
    float32 fa, fb, fr;
431

    
432
    fa = f_to_float32(a);
433
    fb = f_to_float32(b);
434
    fr = float32_add(fa, fb, &FP_STATUS);
435
    return float32_to_f(fr);
436
}
437

    
438
uint64_t helper_subf (uint64_t a, uint64_t b)
439
{
440
    float32 fa, fb, fr;
441

    
442
    fa = f_to_float32(a);
443
    fb = f_to_float32(b);
444
    fr = float32_sub(fa, fb, &FP_STATUS);
445
    return float32_to_f(fr);
446
}
447

    
448
uint64_t helper_mulf (uint64_t a, uint64_t b)
449
{
450
    float32 fa, fb, fr;
451

    
452
    fa = f_to_float32(a);
453
    fb = f_to_float32(b);
454
    fr = float32_mul(fa, fb, &FP_STATUS);
455
    return float32_to_f(fr);
456
}
457

    
458
uint64_t helper_divf (uint64_t a, uint64_t b)
459
{
460
    float32 fa, fb, fr;
461

    
462
    fa = f_to_float32(a);
463
    fb = f_to_float32(b);
464
    fr = float32_div(fa, fb, &FP_STATUS);
465
    return float32_to_f(fr);
466
}
467

    
468
uint64_t helper_sqrtf (uint64_t t)
469
{
470
    float32 ft, fr;
471

    
472
    ft = f_to_float32(t);
473
    fr = float32_sqrt(ft, &FP_STATUS);
474
    return float32_to_f(fr);
475
}
476

    
477

    
478
/* G floating (VAX) */
479
static always_inline uint64_t float64_to_g (float64 fa)
480
{
481
    uint64_t a, r, exp, mant, sig;
482

    
483
    a = *(uint64_t*)(&fa);
484
    sig = a & 0x8000000000000000ull;
485
    exp = (a >> 52) & 0x7ff;
486
    mant = a & 0x000fffffffffffffull;
487

    
488
    if (exp == 2047) {
489
        /* NaN or infinity */
490
        r = 1; /* VAX dirty zero */
491
    } else if (exp == 0) {
492
        if (mant == 0) {
493
            /* Zero */
494
            r = 0;
495
        } else {
496
            /* Denormalized */
497
            r = sig | ((exp + 1) << 52) | mant;
498
        }
499
    } else {
500
        if (exp >= 2045) {
501
            /* Overflow */
502
            r = 1; /* VAX dirty zero */
503
        } else {
504
            r = sig | ((exp + 2) << 52);
505
        }
506
    }
507

    
508
    return r;
509
}
510

    
511
static always_inline float64 g_to_float64 (uint64_t a)
512
{
513
    uint64_t r, exp, mant_sig;
514

    
515
    exp = (a >> 52) & 0x7ff;
516
    mant_sig = a & 0x800fffffffffffffull;
517

    
518
    if (!exp && mant_sig) {
519
        /* Reserved operands / Dirty zero */
520
        helper_excp(EXCP_OPCDEC, 0);
521
    }
522

    
523
    if (exp < 3) {
524
        /* Underflow */
525
        r = 0;
526
    } else {
527
        r = ((exp - 2) << 52) | mant_sig;
528
    }
529

    
530
    return *(float64*)(&a);
531
}
532

    
533
uint64_t helper_g_to_memory (uint64_t a)
534
{
535
    uint64_t r;
536
    r =  (a & 0x000000000000ffffull) << 48;
537
    r |= (a & 0x00000000ffff0000ull) << 16;
538
    r |= (a & 0x0000ffff00000000ull) >> 16;
539
    r |= (a & 0xffff000000000000ull) >> 48;
540
    return r;
541
}
542

    
543
uint64_t helper_memory_to_g (uint64_t a)
544
{
545
    uint64_t r;
546
    r =  (a & 0x000000000000ffffull) << 48;
547
    r |= (a & 0x00000000ffff0000ull) << 16;
548
    r |= (a & 0x0000ffff00000000ull) >> 16;
549
    r |= (a & 0xffff000000000000ull) >> 48;
550
    return r;
551
}
552

    
553
uint64_t helper_addg (uint64_t a, uint64_t b)
554
{
555
    float64 fa, fb, fr;
556

    
557
    fa = g_to_float64(a);
558
    fb = g_to_float64(b);
559
    fr = float64_add(fa, fb, &FP_STATUS);
560
    return float64_to_g(fr);
561
}
562

    
563
uint64_t helper_subg (uint64_t a, uint64_t b)
564
{
565
    float64 fa, fb, fr;
566

    
567
    fa = g_to_float64(a);
568
    fb = g_to_float64(b);
569
    fr = float64_sub(fa, fb, &FP_STATUS);
570
    return float64_to_g(fr);
571
}
572

    
573
uint64_t helper_mulg (uint64_t a, uint64_t b)
574
{
575
    float64 fa, fb, fr;
576

    
577
    fa = g_to_float64(a);
578
    fb = g_to_float64(b);
579
    fr = float64_mul(fa, fb, &FP_STATUS);
580
    return float64_to_g(fr);
581
}
582

    
583
uint64_t helper_divg (uint64_t a, uint64_t b)
584
{
585
    float64 fa, fb, fr;
586

    
587
    fa = g_to_float64(a);
588
    fb = g_to_float64(b);
589
    fr = float64_div(fa, fb, &FP_STATUS);
590
    return float64_to_g(fr);
591
}
592

    
593
uint64_t helper_sqrtg (uint64_t a)
594
{
595
    float64 fa, fr;
596

    
597
    fa = g_to_float64(a);
598
    fr = float64_sqrt(fa, &FP_STATUS);
599
    return float64_to_g(fr);
600
}
601

    
602

    
603
/* S floating (single) */
604
static always_inline uint64_t float32_to_s (float32 fa)
605
{
606
    uint32_t a;
607
    uint64_t r;
608

    
609
    a = *(uint32_t*)(&fa);
610

    
611
    r = (((uint64_t)(a & 0xc0000000)) << 32) | (((uint64_t)(a & 0x3fffffff)) << 29);
612
    if (((a & 0x7f800000) != 0x7f800000) && (!(a & 0x40000000)))
613
        r |= 0x7ll << 59;
614
    return r;
615
}
616

    
617
static always_inline float32 s_to_float32 (uint64_t a)
618
{
619
    uint32_t r = ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
620
    return *(float32*)(&r);
621
}
622

    
623
uint32_t helper_s_to_memory (uint64_t a)
624
{
625
    /* Memory format is the same as float32 */
626
    float32 fa = s_to_float32(a);
627
    return *(uint32_t*)(&fa);
628
}
629

    
630
uint64_t helper_memory_to_s (uint32_t a)
631
{
632
    /* Memory format is the same as float32 */
633
    return float32_to_s(*(float32*)(&a));
634
}
635

    
636
uint64_t helper_adds (uint64_t a, uint64_t b)
637
{
638
    float32 fa, fb, fr;
639

    
640
    fa = s_to_float32(a);
641
    fb = s_to_float32(b);
642
    fr = float32_add(fa, fb, &FP_STATUS);
643
    return float32_to_s(fr);
644
}
645

    
646
uint64_t helper_subs (uint64_t a, uint64_t b)
647
{
648
    float32 fa, fb, fr;
649

    
650
    fa = s_to_float32(a);
651
    fb = s_to_float32(b);
652
    fr = float32_sub(fa, fb, &FP_STATUS);
653
    return float32_to_s(fr);
654
}
655

    
656
uint64_t helper_muls (uint64_t a, uint64_t b)
657
{
658
    float32 fa, fb, fr;
659

    
660
    fa = s_to_float32(a);
661
    fb = s_to_float32(b);
662
    fr = float32_mul(fa, fb, &FP_STATUS);
663
    return float32_to_s(fr);
664
}
665

    
666
uint64_t helper_divs (uint64_t a, uint64_t b)
667
{
668
    float32 fa, fb, fr;
669

    
670
    fa = s_to_float32(a);
671
    fb = s_to_float32(b);
672
    fr = float32_div(fa, fb, &FP_STATUS);
673
    return float32_to_s(fr);
674
}
675

    
676
uint64_t helper_sqrts (uint64_t a)
677
{
678
    float32 fa, fr;
679

    
680
    fa = s_to_float32(a);
681
    fr = float32_sqrt(fa, &FP_STATUS);
682
    return float32_to_s(fr);
683
}
684

    
685

    
686
/* T floating (double) */
687
static always_inline float64 t_to_float64 (uint64_t a)
688
{
689
    /* Memory format is the same as float64 */
690
    return *(float64*)(&a);
691
}
692

    
693
static always_inline uint64_t float64_to_t (float64 fa)
694
{
695
    /* Memory format is the same as float64 */
696
    return *(uint64*)(&fa);
697
}
698

    
699
uint64_t helper_addt (uint64_t a, uint64_t b)
700
{
701
    float64 fa, fb, fr;
702

    
703
    fa = t_to_float64(a);
704
    fb = t_to_float64(b);
705
    fr = float64_add(fa, fb, &FP_STATUS);
706
    return float64_to_t(fr);
707
}
708

    
709
uint64_t helper_subt (uint64_t a, uint64_t b)
710
{
711
    float64 fa, fb, fr;
712

    
713
    fa = t_to_float64(a);
714
    fb = t_to_float64(b);
715
    fr = float64_sub(fa, fb, &FP_STATUS);
716
    return float64_to_t(fr);
717
}
718

    
719
uint64_t helper_mult (uint64_t a, uint64_t b)
720
{
721
    float64 fa, fb, fr;
722

    
723
    fa = t_to_float64(a);
724
    fb = t_to_float64(b);
725
    fr = float64_mul(fa, fb, &FP_STATUS);
726
    return float64_to_t(fr);
727
}
728

    
729
uint64_t helper_divt (uint64_t a, uint64_t b)
730
{
731
    float64 fa, fb, fr;
732

    
733
    fa = t_to_float64(a);
734
    fb = t_to_float64(b);
735
    fr = float64_div(fa, fb, &FP_STATUS);
736
    return float64_to_t(fr);
737
}
738

    
739
uint64_t helper_sqrtt (uint64_t a)
740
{
741
    float64 fa, fr;
742

    
743
    fa = t_to_float64(a);
744
    fr = float64_sqrt(fa, &FP_STATUS);
745
    return float64_to_t(fr);
746
}
747

    
748

    
749
/* Sign copy */
750
uint64_t helper_cpys(uint64_t a, uint64_t b)
751
{
752
    return (a & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
753
}
754

    
755
uint64_t helper_cpysn(uint64_t a, uint64_t b)
756
{
757
    return ((~a) & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
758
}
759

    
760
uint64_t helper_cpyse(uint64_t a, uint64_t b)
761
{
762
    return (a & 0xFFF0000000000000ULL) | (b & ~0xFFF0000000000000ULL);
763
}
764

    
765

    
766
/* Comparisons */
767
uint64_t helper_cmptun (uint64_t a, uint64_t b)
768
{
769
    float64 fa, fb;
770

    
771
    fa = t_to_float64(a);
772
    fb = t_to_float64(b);
773

    
774
    if (float64_is_nan(fa) || float64_is_nan(fb))
775
        return 0x4000000000000000ULL;
776
    else
777
        return 0;
778
}
779

    
780
uint64_t helper_cmpteq(uint64_t a, uint64_t b)
781
{
782
    float64 fa, fb;
783

    
784
    fa = t_to_float64(a);
785
    fb = t_to_float64(b);
786

    
787
    if (float64_eq(fa, fb, &FP_STATUS))
788
        return 0x4000000000000000ULL;
789
    else
790
        return 0;
791
}
792

    
793
uint64_t helper_cmptle(uint64_t a, uint64_t b)
794
{
795
    float64 fa, fb;
796

    
797
    fa = t_to_float64(a);
798
    fb = t_to_float64(b);
799

    
800
    if (float64_le(fa, fb, &FP_STATUS))
801
        return 0x4000000000000000ULL;
802
    else
803
        return 0;
804
}
805

    
806
uint64_t helper_cmptlt(uint64_t a, uint64_t b)
807
{
808
    float64 fa, fb;
809

    
810
    fa = t_to_float64(a);
811
    fb = t_to_float64(b);
812

    
813
    if (float64_lt(fa, fb, &FP_STATUS))
814
        return 0x4000000000000000ULL;
815
    else
816
        return 0;
817
}
818

    
819
uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
820
{
821
    float64 fa, fb;
822

    
823
    fa = g_to_float64(a);
824
    fb = g_to_float64(b);
825

    
826
    if (float64_eq(fa, fb, &FP_STATUS))
827
        return 0x4000000000000000ULL;
828
    else
829
        return 0;
830
}
831

    
832
uint64_t helper_cmpgle(uint64_t a, uint64_t b)
833
{
834
    float64 fa, fb;
835

    
836
    fa = g_to_float64(a);
837
    fb = g_to_float64(b);
838

    
839
    if (float64_le(fa, fb, &FP_STATUS))
840
        return 0x4000000000000000ULL;
841
    else
842
        return 0;
843
}
844

    
845
uint64_t helper_cmpglt(uint64_t a, uint64_t b)
846
{
847
    float64 fa, fb;
848

    
849
    fa = g_to_float64(a);
850
    fb = g_to_float64(b);
851

    
852
    if (float64_lt(fa, fb, &FP_STATUS))
853
        return 0x4000000000000000ULL;
854
    else
855
        return 0;
856
}
857

    
858
uint64_t helper_cmpfeq (uint64_t a)
859
{
860
    return !(a & 0x7FFFFFFFFFFFFFFFULL);
861
}
862

    
863
uint64_t helper_cmpfne (uint64_t a)
864
{
865
    return (a & 0x7FFFFFFFFFFFFFFFULL);
866
}
867

    
868
uint64_t helper_cmpflt (uint64_t a)
869
{
870
    return (a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
871
}
872

    
873
uint64_t helper_cmpfle (uint64_t a)
874
{
875
    return (a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
876
}
877

    
878
uint64_t helper_cmpfgt (uint64_t a)
879
{
880
    return !(a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
881
}
882

    
883
uint64_t helper_cmpfge (uint64_t a)
884
{
885
    return !(a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
886
}
887

    
888

    
889
/* Floating point format conversion */
890
uint64_t helper_cvtts (uint64_t a)
891
{
892
    float64 fa;
893
    float32 fr;
894

    
895
    fa = t_to_float64(a);
896
    fr = float64_to_float32(fa, &FP_STATUS);
897
    return float32_to_s(fr);
898
}
899

    
900
uint64_t helper_cvtst (uint64_t a)
901
{
902
    float32 fa;
903
    float64 fr;
904

    
905
    fa = s_to_float32(a);
906
    fr = float32_to_float64(fa, &FP_STATUS);
907
    return float64_to_t(fr);
908
}
909

    
910
uint64_t helper_cvtqs (uint64_t a)
911
{
912
    float32 fr = int64_to_float32(a, &FP_STATUS);
913
    return float32_to_s(fr);
914
}
915

    
916
uint64_t helper_cvttq (uint64_t a)
917
{
918
    float64 fa = t_to_float64(a);
919
    return float64_to_int64_round_to_zero(fa, &FP_STATUS);
920
}
921

    
922
uint64_t helper_cvtqt (uint64_t a)
923
{
924
    float64 fr = int64_to_float64(a, &FP_STATUS);
925
    return float64_to_t(fr);
926
}
927

    
928
uint64_t helper_cvtqf (uint64_t a)
929
{
930
    float32 fr = int64_to_float32(a, &FP_STATUS);
931
    return float32_to_f(fr);
932
}
933

    
934
uint64_t helper_cvtgf (uint64_t a)
935
{
936
    float64 fa;
937
    float32 fr;
938

    
939
    fa = g_to_float64(a);
940
    fr = float64_to_float32(fa, &FP_STATUS);
941
    return float32_to_f(fr);
942
}
943

    
944
uint64_t helper_cvtgq (uint64_t a)
945
{
946
    float64 fa = g_to_float64(a);
947
    return float64_to_int64_round_to_zero(fa, &FP_STATUS);
948
}
949

    
950
uint64_t helper_cvtqg (uint64_t a)
951
{
952
    float64 fr;
953
    fr = int64_to_float64(a, &FP_STATUS);
954
    return float64_to_g(fr);
955
}
956

    
957
uint64_t helper_cvtlq (uint64_t a)
958
{
959
    return (int64_t)((int32_t)((a >> 32) | ((a >> 29) & 0x3FFFFFFF)));
960
}
961

    
962
static always_inline uint64_t __helper_cvtql (uint64_t a, int s, int v)
963
{
964
    uint64_t r;
965

    
966
    r = ((uint64_t)(a & 0xC0000000)) << 32;
967
    r |= ((uint64_t)(a & 0x7FFFFFFF)) << 29;
968

    
969
    if (v && (int64_t)((int32_t)r) != (int64_t)r) {
970
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
971
    }
972
    if (s) {
973
        /* TODO */
974
    }
975
    return r;
976
}
977

    
978
uint64_t helper_cvtql (uint64_t a)
979
{
980
    return __helper_cvtql(a, 0, 0);
981
}
982

    
983
uint64_t helper_cvtqlv (uint64_t a)
984
{
985
    return __helper_cvtql(a, 0, 1);
986
}
987

    
988
uint64_t helper_cvtqlsv (uint64_t a)
989
{
990
    return __helper_cvtql(a, 1, 1);
991
}
992

    
993
#if !defined (CONFIG_USER_ONLY)
994
void helper_mfpr (int iprn)
995
{
996
    uint64_t val;
997

    
998
    if (cpu_alpha_mfpr(env, iprn, &val) == 0)
999
        T0 = val;
1000
}
1001

    
1002
void helper_mtpr (int iprn)
1003
{
1004
    cpu_alpha_mtpr(env, iprn, T0, NULL);
1005
}
1006
#endif
1007

    
1008
/*****************************************************************************/
1009
/* Softmmu support */
1010
#if !defined (CONFIG_USER_ONLY)
1011

    
1012
/* XXX: the two following helpers are pure hacks.
1013
 *      Hopefully, we emulate the PALcode, then we should never see
1014
 *      HW_LD / HW_ST instructions.
1015
 */
1016
void helper_ld_phys_to_virt (void)
1017
{
1018
    uint64_t tlb_addr, physaddr;
1019
    int index, mmu_idx;
1020
    void *retaddr;
1021

    
1022
    mmu_idx = cpu_mmu_index(env);
1023
    index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1024
 redo:
1025
    tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1026
    if ((T0 & TARGET_PAGE_MASK) ==
1027
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1028
        physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1029
    } else {
1030
        /* the page is not in the TLB : fill it */
1031
        retaddr = GETPC();
1032
        tlb_fill(T0, 0, mmu_idx, retaddr);
1033
        goto redo;
1034
    }
1035
    T0 = physaddr;
1036
}
1037

    
1038
void helper_st_phys_to_virt (void)
1039
{
1040
    uint64_t tlb_addr, physaddr;
1041
    int index, mmu_idx;
1042
    void *retaddr;
1043

    
1044
    mmu_idx = cpu_mmu_index(env);
1045
    index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1046
 redo:
1047
    tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1048
    if ((T0 & TARGET_PAGE_MASK) ==
1049
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1050
        physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1051
    } else {
1052
        /* the page is not in the TLB : fill it */
1053
        retaddr = GETPC();
1054
        tlb_fill(T0, 1, mmu_idx, retaddr);
1055
        goto redo;
1056
    }
1057
    T0 = physaddr;
1058
}
1059

    
1060
#define MMUSUFFIX _mmu
1061

    
1062
#define SHIFT 0
1063
#include "softmmu_template.h"
1064

    
1065
#define SHIFT 1
1066
#include "softmmu_template.h"
1067

    
1068
#define SHIFT 2
1069
#include "softmmu_template.h"
1070

    
1071
#define SHIFT 3
1072
#include "softmmu_template.h"
1073

    
1074
/* try to fill the TLB and return an exception if error. If retaddr is
1075
   NULL, it means that the function was called in C code (i.e. not
1076
   from generated code or from helper.c) */
1077
/* XXX: fix it to restore all registers */
1078
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1079
{
1080
    TranslationBlock *tb;
1081
    CPUState *saved_env;
1082
    unsigned long pc;
1083
    int ret;
1084

    
1085
    /* XXX: hack to restore env in all cases, even if not called from
1086
       generated code */
1087
    saved_env = env;
1088
    env = cpu_single_env;
1089
    ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1090
    if (!likely(ret == 0)) {
1091
        if (likely(retaddr)) {
1092
            /* now we have a real cpu fault */
1093
            pc = (unsigned long)retaddr;
1094
            tb = tb_find_pc(pc);
1095
            if (likely(tb)) {
1096
                /* the PC is inside the translated code. It means that we have
1097
                   a virtual CPU fault */
1098
                cpu_restore_state(tb, env, pc, NULL);
1099
            }
1100
        }
1101
        /* Exception index and error code are already set */
1102
        cpu_loop_exit();
1103
    }
1104
    env = saved_env;
1105
}
1106

    
1107
#endif