Statistics
| Branch: | Revision:

root / target-alpha / op_helper.c @ 8579095b

History | View | Annotate | Download (25.2 kB)

1
/*
2
 *  Alpha emulation cpu micro-operations helpers for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20

    
21
#include "exec.h"
22
#include "host-utils.h"
23
#include "softfloat.h"
24
#include "helper.h"
25

    
26
void helper_tb_flush (void)
27
{
28
    tb_flush(env);
29
}
30

    
31
/*****************************************************************************/
32
/* Exceptions processing helpers */
33
void helper_excp (int excp, int error)
34
{
35
    env->exception_index = excp;
36
    env->error_code = error;
37
    cpu_loop_exit();
38
}
39

    
40
uint64_t helper_amask (uint64_t arg)
41
{
42
    switch (env->implver) {
43
    case IMPLVER_2106x:
44
        /* EV4, EV45, LCA, LCA45 & EV5 */
45
        break;
46
    case IMPLVER_21164:
47
    case IMPLVER_21264:
48
    case IMPLVER_21364:
49
        arg &= ~env->amask;
50
        break;
51
    }
52
    return arg;
53
}
54

    
55
uint64_t helper_load_pcc (void)
56
{
57
    /* XXX: TODO */
58
    return 0;
59
}
60

    
61
uint64_t helper_load_fpcr (void)
62
{
63
    uint64_t ret = 0;
64
#ifdef CONFIG_SOFTFLOAT
65
    ret |= env->fp_status.float_exception_flags << 52;
66
    if (env->fp_status.float_exception_flags)
67
        ret |= 1ULL << 63;
68
    env->ipr[IPR_EXC_SUM] &= ~0x3E:
69
    env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
70
#endif
71
    switch (env->fp_status.float_rounding_mode) {
72
    case float_round_nearest_even:
73
        ret |= 2ULL << 58;
74
        break;
75
    case float_round_down:
76
        ret |= 1ULL << 58;
77
        break;
78
    case float_round_up:
79
        ret |= 3ULL << 58;
80
        break;
81
    case float_round_to_zero:
82
        break;
83
    }
84
    return ret;
85
}
86

    
87
void helper_store_fpcr (uint64_t val)
88
{
89
#ifdef CONFIG_SOFTFLOAT
90
    set_float_exception_flags((val >> 52) & 0x3F, &FP_STATUS);
91
#endif
92
    switch ((val >> 58) & 3) {
93
    case 0:
94
        set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
95
        break;
96
    case 1:
97
        set_float_rounding_mode(float_round_down, &FP_STATUS);
98
        break;
99
    case 2:
100
        set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
101
        break;
102
    case 3:
103
        set_float_rounding_mode(float_round_up, &FP_STATUS);
104
        break;
105
    }
106
}
107

    
108
spinlock_t intr_cpu_lock = SPIN_LOCK_UNLOCKED;
109

    
110
uint64_t helper_rs(void)
111
{
112
    uint64_t tmp;
113

    
114
    spin_lock(&intr_cpu_lock);
115
    tmp = env->intr_flag;
116
    env->intr_flag = 1;
117
    spin_unlock(&intr_cpu_lock);
118

    
119
    return tmp;
120
}
121

    
122
uint64_t helper_rc(void)
123
{
124
    uint64_t tmp;
125

    
126
    spin_lock(&intr_cpu_lock);
127
    tmp = env->intr_flag;
128
    env->intr_flag = 0;
129
    spin_unlock(&intr_cpu_lock);
130

    
131
    return tmp;
132
}
133

    
134
uint64_t helper_addqv (uint64_t op1, uint64_t op2)
135
{
136
    uint64_t tmp = op1;
137
    op1 += op2;
138
    if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
139
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
140
    }
141
    return op1;
142
}
143

    
144
uint64_t helper_addlv (uint64_t op1, uint64_t op2)
145
{
146
    uint64_t tmp = op1;
147
    op1 = (uint32_t)(op1 + op2);
148
    if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
149
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
150
    }
151
    return op1;
152
}
153

    
154
uint64_t helper_subqv (uint64_t op1, uint64_t op2)
155
{
156
    uint64_t res;
157
    res = op1 - op2;
158
    if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
159
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
160
    }
161
    return res;
162
}
163

    
164
uint64_t helper_sublv (uint64_t op1, uint64_t op2)
165
{
166
    uint32_t res;
167
    res = op1 - op2;
168
    if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
169
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
170
    }
171
    return res;
172
}
173

    
174
uint64_t helper_mullv (uint64_t op1, uint64_t op2)
175
{
176
    int64_t res = (int64_t)op1 * (int64_t)op2;
177

    
178
    if (unlikely((int32_t)res != res)) {
179
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
180
    }
181
    return (int64_t)((int32_t)res);
182
}
183

    
184
uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
185
{
186
    uint64_t tl, th;
187

    
188
    muls64(&tl, &th, op1, op2);
189
    /* If th != 0 && th != -1, then we had an overflow */
190
    if (unlikely((th + 1) > 1)) {
191
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
192
    }
193
    return tl;
194
}
195

    
196
uint64_t helper_umulh (uint64_t op1, uint64_t op2)
197
{
198
    uint64_t tl, th;
199

    
200
    mulu64(&tl, &th, op1, op2);
201
    return th;
202
}
203

    
204
uint64_t helper_ctpop (uint64_t arg)
205
{
206
    return ctpop64(arg);
207
}
208

    
209
uint64_t helper_ctlz (uint64_t arg)
210
{
211
    return clz64(arg);
212
}
213

    
214
uint64_t helper_cttz (uint64_t arg)
215
{
216
    return ctz64(arg);
217
}
218

    
219
static always_inline uint64_t byte_zap (uint64_t op, uint8_t mskb)
220
{
221
    uint64_t mask;
222

    
223
    mask = 0;
224
    mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
225
    mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
226
    mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
227
    mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
228
    mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
229
    mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
230
    mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
231
    mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
232

    
233
    return op & ~mask;
234
}
235

    
236
uint64_t helper_mskbl(uint64_t val, uint64_t mask)
237
{
238
    return byte_zap(val, 0x01 << (mask & 7));
239
}
240

    
241
uint64_t helper_insbl(uint64_t val, uint64_t mask)
242
{
243
    val <<= (mask & 7) * 8;
244
    return byte_zap(val, ~(0x01 << (mask & 7)));
245
}
246

    
247
uint64_t helper_mskwl(uint64_t val, uint64_t mask)
248
{
249
    return byte_zap(val, 0x03 << (mask & 7));
250
}
251

    
252
uint64_t helper_inswl(uint64_t val, uint64_t mask)
253
{
254
    val <<= (mask & 7) * 8;
255
    return byte_zap(val, ~(0x03 << (mask & 7)));
256
}
257

    
258
uint64_t helper_mskll(uint64_t val, uint64_t mask)
259
{
260
    return byte_zap(val, 0x0F << (mask & 7));
261
}
262

    
263
uint64_t helper_insll(uint64_t val, uint64_t mask)
264
{
265
    val <<= (mask & 7) * 8;
266
    return byte_zap(val, ~(0x0F << (mask & 7)));
267
}
268

    
269
uint64_t helper_zap(uint64_t val, uint64_t mask)
270
{
271
    return byte_zap(val, mask);
272
}
273

    
274
uint64_t helper_zapnot(uint64_t val, uint64_t mask)
275
{
276
    return byte_zap(val, ~mask);
277
}
278

    
279
uint64_t helper_mskql(uint64_t val, uint64_t mask)
280
{
281
    return byte_zap(val, 0xFF << (mask & 7));
282
}
283

    
284
uint64_t helper_insql(uint64_t val, uint64_t mask)
285
{
286
    val <<= (mask & 7) * 8;
287
    return byte_zap(val, ~(0xFF << (mask & 7)));
288
}
289

    
290
uint64_t helper_mskwh(uint64_t val, uint64_t mask)
291
{
292
    return byte_zap(val, (0x03 << (mask & 7)) >> 8);
293
}
294

    
295
uint64_t helper_inswh(uint64_t val, uint64_t mask)
296
{
297
    val >>= 64 - ((mask & 7) * 8);
298
    return byte_zap(val, ~((0x03 << (mask & 7)) >> 8));
299
}
300

    
301
uint64_t helper_msklh(uint64_t val, uint64_t mask)
302
{
303
    return byte_zap(val, (0x0F << (mask & 7)) >> 8);
304
}
305

    
306
uint64_t helper_inslh(uint64_t val, uint64_t mask)
307
{
308
    val >>= 64 - ((mask & 7) * 8);
309
    return byte_zap(val, ~((0x0F << (mask & 7)) >> 8));
310
}
311

    
312
uint64_t helper_mskqh(uint64_t val, uint64_t mask)
313
{
314
    return byte_zap(val, (0xFF << (mask & 7)) >> 8);
315
}
316

    
317
uint64_t helper_insqh(uint64_t val, uint64_t mask)
318
{
319
    val >>= 64 - ((mask & 7) * 8);
320
    return byte_zap(val, ~((0xFF << (mask & 7)) >> 8));
321
}
322

    
323
uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
324
{
325
    uint8_t opa, opb, res;
326
    int i;
327

    
328
    res = 0;
329
    for (i = 0; i < 8; i++) {
330
        opa = op1 >> (i * 8);
331
        opb = op2 >> (i * 8);
332
        if (opa >= opb)
333
            res |= 1 << i;
334
    }
335
    return res;
336
}
337

    
338
/* Floating point helpers */
339

    
340
/* F floating (VAX) */
341
static always_inline uint64_t float32_to_f (float32 fa)
342
{
343
    uint64_t r, exp, mant, sig;
344
    CPU_FloatU a;
345

    
346
    a.f = fa;
347
    sig = ((uint64_t)a.l & 0x80000000) << 32;
348
    exp = (a.l >> 23) & 0xff;
349
    mant = ((uint64_t)a.l & 0x007fffff) << 29;
350

    
351
    if (exp == 255) {
352
        /* NaN or infinity */
353
        r = 1; /* VAX dirty zero */
354
    } else if (exp == 0) {
355
        if (mant == 0) {
356
            /* Zero */
357
            r = 0;
358
        } else {
359
            /* Denormalized */
360
            r = sig | ((exp + 1) << 52) | mant;
361
        }
362
    } else {
363
        if (exp >= 253) {
364
            /* Overflow */
365
            r = 1; /* VAX dirty zero */
366
        } else {
367
            r = sig | ((exp + 2) << 52);
368
        }
369
    }
370

    
371
    return r;
372
}
373

    
374
static always_inline float32 f_to_float32 (uint64_t a)
375
{
376
    uint32_t exp, mant_sig;
377
    CPU_FloatU r;
378

    
379
    exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
380
    mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
381

    
382
    if (unlikely(!exp && mant_sig)) {
383
        /* Reserved operands / Dirty zero */
384
        helper_excp(EXCP_OPCDEC, 0);
385
    }
386

    
387
    if (exp < 3) {
388
        /* Underflow */
389
        r.l = 0;
390
    } else {
391
        r.l = ((exp - 2) << 23) | mant_sig;
392
    }
393

    
394
    return r.f;
395
}
396

    
397
uint32_t helper_f_to_memory (uint64_t a)
398
{
399
    uint32_t r;
400
    r =  (a & 0x00001fffe0000000ull) >> 13;
401
    r |= (a & 0x07ffe00000000000ull) >> 45;
402
    r |= (a & 0xc000000000000000ull) >> 48;
403
    return r;
404
}
405

    
406
uint64_t helper_memory_to_f (uint32_t a)
407
{
408
    uint64_t r;
409
    r =  ((uint64_t)(a & 0x0000c000)) << 48;
410
    r |= ((uint64_t)(a & 0x003fffff)) << 45;
411
    r |= ((uint64_t)(a & 0xffff0000)) << 13;
412
    if (!(a & 0x00004000))
413
        r |= 0x7ll << 59;
414
    return r;
415
}
416

    
417
uint64_t helper_addf (uint64_t a, uint64_t b)
418
{
419
    float32 fa, fb, fr;
420

    
421
    fa = f_to_float32(a);
422
    fb = f_to_float32(b);
423
    fr = float32_add(fa, fb, &FP_STATUS);
424
    return float32_to_f(fr);
425
}
426

    
427
uint64_t helper_subf (uint64_t a, uint64_t b)
428
{
429
    float32 fa, fb, fr;
430

    
431
    fa = f_to_float32(a);
432
    fb = f_to_float32(b);
433
    fr = float32_sub(fa, fb, &FP_STATUS);
434
    return float32_to_f(fr);
435
}
436

    
437
uint64_t helper_mulf (uint64_t a, uint64_t b)
438
{
439
    float32 fa, fb, fr;
440

    
441
    fa = f_to_float32(a);
442
    fb = f_to_float32(b);
443
    fr = float32_mul(fa, fb, &FP_STATUS);
444
    return float32_to_f(fr);
445
}
446

    
447
uint64_t helper_divf (uint64_t a, uint64_t b)
448
{
449
    float32 fa, fb, fr;
450

    
451
    fa = f_to_float32(a);
452
    fb = f_to_float32(b);
453
    fr = float32_div(fa, fb, &FP_STATUS);
454
    return float32_to_f(fr);
455
}
456

    
457
uint64_t helper_sqrtf (uint64_t t)
458
{
459
    float32 ft, fr;
460

    
461
    ft = f_to_float32(t);
462
    fr = float32_sqrt(ft, &FP_STATUS);
463
    return float32_to_f(fr);
464
}
465

    
466

    
467
/* G floating (VAX) */
468
static always_inline uint64_t float64_to_g (float64 fa)
469
{
470
    uint64_t r, exp, mant, sig;
471
    CPU_DoubleU a;
472

    
473
    a.d = fa;
474
    sig = a.ll & 0x8000000000000000ull;
475
    exp = (a.ll >> 52) & 0x7ff;
476
    mant = a.ll & 0x000fffffffffffffull;
477

    
478
    if (exp == 2047) {
479
        /* NaN or infinity */
480
        r = 1; /* VAX dirty zero */
481
    } else if (exp == 0) {
482
        if (mant == 0) {
483
            /* Zero */
484
            r = 0;
485
        } else {
486
            /* Denormalized */
487
            r = sig | ((exp + 1) << 52) | mant;
488
        }
489
    } else {
490
        if (exp >= 2045) {
491
            /* Overflow */
492
            r = 1; /* VAX dirty zero */
493
        } else {
494
            r = sig | ((exp + 2) << 52);
495
        }
496
    }
497

    
498
    return r;
499
}
500

    
501
static always_inline float64 g_to_float64 (uint64_t a)
502
{
503
    uint64_t exp, mant_sig;
504
    CPU_DoubleU r;
505

    
506
    exp = (a >> 52) & 0x7ff;
507
    mant_sig = a & 0x800fffffffffffffull;
508

    
509
    if (!exp && mant_sig) {
510
        /* Reserved operands / Dirty zero */
511
        helper_excp(EXCP_OPCDEC, 0);
512
    }
513

    
514
    if (exp < 3) {
515
        /* Underflow */
516
        r.ll = 0;
517
    } else {
518
        r.ll = ((exp - 2) << 52) | mant_sig;
519
    }
520

    
521
    return r.d;
522
}
523

    
524
uint64_t helper_g_to_memory (uint64_t a)
525
{
526
    uint64_t r;
527
    r =  (a & 0x000000000000ffffull) << 48;
528
    r |= (a & 0x00000000ffff0000ull) << 16;
529
    r |= (a & 0x0000ffff00000000ull) >> 16;
530
    r |= (a & 0xffff000000000000ull) >> 48;
531
    return r;
532
}
533

    
534
uint64_t helper_memory_to_g (uint64_t a)
535
{
536
    uint64_t r;
537
    r =  (a & 0x000000000000ffffull) << 48;
538
    r |= (a & 0x00000000ffff0000ull) << 16;
539
    r |= (a & 0x0000ffff00000000ull) >> 16;
540
    r |= (a & 0xffff000000000000ull) >> 48;
541
    return r;
542
}
543

    
544
uint64_t helper_addg (uint64_t a, uint64_t b)
545
{
546
    float64 fa, fb, fr;
547

    
548
    fa = g_to_float64(a);
549
    fb = g_to_float64(b);
550
    fr = float64_add(fa, fb, &FP_STATUS);
551
    return float64_to_g(fr);
552
}
553

    
554
uint64_t helper_subg (uint64_t a, uint64_t b)
555
{
556
    float64 fa, fb, fr;
557

    
558
    fa = g_to_float64(a);
559
    fb = g_to_float64(b);
560
    fr = float64_sub(fa, fb, &FP_STATUS);
561
    return float64_to_g(fr);
562
}
563

    
564
uint64_t helper_mulg (uint64_t a, uint64_t b)
565
{
566
    float64 fa, fb, fr;
567

    
568
    fa = g_to_float64(a);
569
    fb = g_to_float64(b);
570
    fr = float64_mul(fa, fb, &FP_STATUS);
571
    return float64_to_g(fr);
572
}
573

    
574
uint64_t helper_divg (uint64_t a, uint64_t b)
575
{
576
    float64 fa, fb, fr;
577

    
578
    fa = g_to_float64(a);
579
    fb = g_to_float64(b);
580
    fr = float64_div(fa, fb, &FP_STATUS);
581
    return float64_to_g(fr);
582
}
583

    
584
uint64_t helper_sqrtg (uint64_t a)
585
{
586
    float64 fa, fr;
587

    
588
    fa = g_to_float64(a);
589
    fr = float64_sqrt(fa, &FP_STATUS);
590
    return float64_to_g(fr);
591
}
592

    
593

    
594
/* S floating (single) */
595
static always_inline uint64_t float32_to_s (float32 fa)
596
{
597
    CPU_FloatU a;
598
    uint64_t r;
599

    
600
    a.f = fa;
601

    
602
    r = (((uint64_t)(a.l & 0xc0000000)) << 32) | (((uint64_t)(a.l & 0x3fffffff)) << 29);
603
    if (((a.l & 0x7f800000) != 0x7f800000) && (!(a.l & 0x40000000)))
604
        r |= 0x7ll << 59;
605
    return r;
606
}
607

    
608
static always_inline float32 s_to_float32 (uint64_t a)
609
{
610
    CPU_FloatU r;
611
    r.l = ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
612
    return r.f;
613
}
614

    
615
uint32_t helper_s_to_memory (uint64_t a)
616
{
617
    /* Memory format is the same as float32 */
618
    float32 fa = s_to_float32(a);
619
    return *(uint32_t*)(&fa);
620
}
621

    
622
uint64_t helper_memory_to_s (uint32_t a)
623
{
624
    /* Memory format is the same as float32 */
625
    return float32_to_s(*(float32*)(&a));
626
}
627

    
628
uint64_t helper_adds (uint64_t a, uint64_t b)
629
{
630
    float32 fa, fb, fr;
631

    
632
    fa = s_to_float32(a);
633
    fb = s_to_float32(b);
634
    fr = float32_add(fa, fb, &FP_STATUS);
635
    return float32_to_s(fr);
636
}
637

    
638
uint64_t helper_subs (uint64_t a, uint64_t b)
639
{
640
    float32 fa, fb, fr;
641

    
642
    fa = s_to_float32(a);
643
    fb = s_to_float32(b);
644
    fr = float32_sub(fa, fb, &FP_STATUS);
645
    return float32_to_s(fr);
646
}
647

    
648
uint64_t helper_muls (uint64_t a, uint64_t b)
649
{
650
    float32 fa, fb, fr;
651

    
652
    fa = s_to_float32(a);
653
    fb = s_to_float32(b);
654
    fr = float32_mul(fa, fb, &FP_STATUS);
655
    return float32_to_s(fr);
656
}
657

    
658
uint64_t helper_divs (uint64_t a, uint64_t b)
659
{
660
    float32 fa, fb, fr;
661

    
662
    fa = s_to_float32(a);
663
    fb = s_to_float32(b);
664
    fr = float32_div(fa, fb, &FP_STATUS);
665
    return float32_to_s(fr);
666
}
667

    
668
uint64_t helper_sqrts (uint64_t a)
669
{
670
    float32 fa, fr;
671

    
672
    fa = s_to_float32(a);
673
    fr = float32_sqrt(fa, &FP_STATUS);
674
    return float32_to_s(fr);
675
}
676

    
677

    
678
/* T floating (double) */
679
static always_inline float64 t_to_float64 (uint64_t a)
680
{
681
    /* Memory format is the same as float64 */
682
    CPU_DoubleU r;
683
    r.ll = a;
684
    return r.d;
685
}
686

    
687
static always_inline uint64_t float64_to_t (float64 fa)
688
{
689
    /* Memory format is the same as float64 */
690
    CPU_DoubleU r;
691
    r.d = fa;
692
    return r.ll;
693
}
694

    
695
uint64_t helper_addt (uint64_t a, uint64_t b)
696
{
697
    float64 fa, fb, fr;
698

    
699
    fa = t_to_float64(a);
700
    fb = t_to_float64(b);
701
    fr = float64_add(fa, fb, &FP_STATUS);
702
    return float64_to_t(fr);
703
}
704

    
705
uint64_t helper_subt (uint64_t a, uint64_t b)
706
{
707
    float64 fa, fb, fr;
708

    
709
    fa = t_to_float64(a);
710
    fb = t_to_float64(b);
711
    fr = float64_sub(fa, fb, &FP_STATUS);
712
    return float64_to_t(fr);
713
}
714

    
715
uint64_t helper_mult (uint64_t a, uint64_t b)
716
{
717
    float64 fa, fb, fr;
718

    
719
    fa = t_to_float64(a);
720
    fb = t_to_float64(b);
721
    fr = float64_mul(fa, fb, &FP_STATUS);
722
    return float64_to_t(fr);
723
}
724

    
725
uint64_t helper_divt (uint64_t a, uint64_t b)
726
{
727
    float64 fa, fb, fr;
728

    
729
    fa = t_to_float64(a);
730
    fb = t_to_float64(b);
731
    fr = float64_div(fa, fb, &FP_STATUS);
732
    return float64_to_t(fr);
733
}
734

    
735
uint64_t helper_sqrtt (uint64_t a)
736
{
737
    float64 fa, fr;
738

    
739
    fa = t_to_float64(a);
740
    fr = float64_sqrt(fa, &FP_STATUS);
741
    return float64_to_t(fr);
742
}
743

    
744

    
745
/* Sign copy */
746
uint64_t helper_cpys(uint64_t a, uint64_t b)
747
{
748
    return (a & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
749
}
750

    
751
uint64_t helper_cpysn(uint64_t a, uint64_t b)
752
{
753
    return ((~a) & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
754
}
755

    
756
uint64_t helper_cpyse(uint64_t a, uint64_t b)
757
{
758
    return (a & 0xFFF0000000000000ULL) | (b & ~0xFFF0000000000000ULL);
759
}
760

    
761

    
762
/* Comparisons */
763
uint64_t helper_cmptun (uint64_t a, uint64_t b)
764
{
765
    float64 fa, fb;
766

    
767
    fa = t_to_float64(a);
768
    fb = t_to_float64(b);
769

    
770
    if (float64_is_nan(fa) || float64_is_nan(fb))
771
        return 0x4000000000000000ULL;
772
    else
773
        return 0;
774
}
775

    
776
uint64_t helper_cmpteq(uint64_t a, uint64_t b)
777
{
778
    float64 fa, fb;
779

    
780
    fa = t_to_float64(a);
781
    fb = t_to_float64(b);
782

    
783
    if (float64_eq(fa, fb, &FP_STATUS))
784
        return 0x4000000000000000ULL;
785
    else
786
        return 0;
787
}
788

    
789
uint64_t helper_cmptle(uint64_t a, uint64_t b)
790
{
791
    float64 fa, fb;
792

    
793
    fa = t_to_float64(a);
794
    fb = t_to_float64(b);
795

    
796
    if (float64_le(fa, fb, &FP_STATUS))
797
        return 0x4000000000000000ULL;
798
    else
799
        return 0;
800
}
801

    
802
uint64_t helper_cmptlt(uint64_t a, uint64_t b)
803
{
804
    float64 fa, fb;
805

    
806
    fa = t_to_float64(a);
807
    fb = t_to_float64(b);
808

    
809
    if (float64_lt(fa, fb, &FP_STATUS))
810
        return 0x4000000000000000ULL;
811
    else
812
        return 0;
813
}
814

    
815
uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
816
{
817
    float64 fa, fb;
818

    
819
    fa = g_to_float64(a);
820
    fb = g_to_float64(b);
821

    
822
    if (float64_eq(fa, fb, &FP_STATUS))
823
        return 0x4000000000000000ULL;
824
    else
825
        return 0;
826
}
827

    
828
uint64_t helper_cmpgle(uint64_t a, uint64_t b)
829
{
830
    float64 fa, fb;
831

    
832
    fa = g_to_float64(a);
833
    fb = g_to_float64(b);
834

    
835
    if (float64_le(fa, fb, &FP_STATUS))
836
        return 0x4000000000000000ULL;
837
    else
838
        return 0;
839
}
840

    
841
uint64_t helper_cmpglt(uint64_t a, uint64_t b)
842
{
843
    float64 fa, fb;
844

    
845
    fa = g_to_float64(a);
846
    fb = g_to_float64(b);
847

    
848
    if (float64_lt(fa, fb, &FP_STATUS))
849
        return 0x4000000000000000ULL;
850
    else
851
        return 0;
852
}
853

    
854
uint64_t helper_cmpfeq (uint64_t a)
855
{
856
    return !(a & 0x7FFFFFFFFFFFFFFFULL);
857
}
858

    
859
uint64_t helper_cmpfne (uint64_t a)
860
{
861
    return (a & 0x7FFFFFFFFFFFFFFFULL);
862
}
863

    
864
uint64_t helper_cmpflt (uint64_t a)
865
{
866
    return (a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
867
}
868

    
869
uint64_t helper_cmpfle (uint64_t a)
870
{
871
    return (a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
872
}
873

    
874
uint64_t helper_cmpfgt (uint64_t a)
875
{
876
    return !(a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
877
}
878

    
879
uint64_t helper_cmpfge (uint64_t a)
880
{
881
    return !(a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
882
}
883

    
884

    
885
/* Floating point format conversion */
886
uint64_t helper_cvtts (uint64_t a)
887
{
888
    float64 fa;
889
    float32 fr;
890

    
891
    fa = t_to_float64(a);
892
    fr = float64_to_float32(fa, &FP_STATUS);
893
    return float32_to_s(fr);
894
}
895

    
896
uint64_t helper_cvtst (uint64_t a)
897
{
898
    float32 fa;
899
    float64 fr;
900

    
901
    fa = s_to_float32(a);
902
    fr = float32_to_float64(fa, &FP_STATUS);
903
    return float64_to_t(fr);
904
}
905

    
906
uint64_t helper_cvtqs (uint64_t a)
907
{
908
    float32 fr = int64_to_float32(a, &FP_STATUS);
909
    return float32_to_s(fr);
910
}
911

    
912
uint64_t helper_cvttq (uint64_t a)
913
{
914
    float64 fa = t_to_float64(a);
915
    return float64_to_int64_round_to_zero(fa, &FP_STATUS);
916
}
917

    
918
uint64_t helper_cvtqt (uint64_t a)
919
{
920
    float64 fr = int64_to_float64(a, &FP_STATUS);
921
    return float64_to_t(fr);
922
}
923

    
924
uint64_t helper_cvtqf (uint64_t a)
925
{
926
    float32 fr = int64_to_float32(a, &FP_STATUS);
927
    return float32_to_f(fr);
928
}
929

    
930
uint64_t helper_cvtgf (uint64_t a)
931
{
932
    float64 fa;
933
    float32 fr;
934

    
935
    fa = g_to_float64(a);
936
    fr = float64_to_float32(fa, &FP_STATUS);
937
    return float32_to_f(fr);
938
}
939

    
940
uint64_t helper_cvtgq (uint64_t a)
941
{
942
    float64 fa = g_to_float64(a);
943
    return float64_to_int64_round_to_zero(fa, &FP_STATUS);
944
}
945

    
946
uint64_t helper_cvtqg (uint64_t a)
947
{
948
    float64 fr;
949
    fr = int64_to_float64(a, &FP_STATUS);
950
    return float64_to_g(fr);
951
}
952

    
953
uint64_t helper_cvtlq (uint64_t a)
954
{
955
    return (int64_t)((int32_t)((a >> 32) | ((a >> 29) & 0x3FFFFFFF)));
956
}
957

    
958
static always_inline uint64_t __helper_cvtql (uint64_t a, int s, int v)
959
{
960
    uint64_t r;
961

    
962
    r = ((uint64_t)(a & 0xC0000000)) << 32;
963
    r |= ((uint64_t)(a & 0x7FFFFFFF)) << 29;
964

    
965
    if (v && (int64_t)((int32_t)r) != (int64_t)r) {
966
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
967
    }
968
    if (s) {
969
        /* TODO */
970
    }
971
    return r;
972
}
973

    
974
uint64_t helper_cvtql (uint64_t a)
975
{
976
    return __helper_cvtql(a, 0, 0);
977
}
978

    
979
uint64_t helper_cvtqlv (uint64_t a)
980
{
981
    return __helper_cvtql(a, 0, 1);
982
}
983

    
984
uint64_t helper_cvtqlsv (uint64_t a)
985
{
986
    return __helper_cvtql(a, 1, 1);
987
}
988

    
989
/* PALcode support special instructions */
990
#if !defined (CONFIG_USER_ONLY)
991
void helper_hw_rei (void)
992
{
993
    env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
994
    env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
995
    /* XXX: re-enable interrupts and memory mapping */
996
}
997

    
998
void helper_hw_ret (uint64_t a)
999
{
1000
    env->pc = a & ~3;
1001
    env->ipr[IPR_EXC_ADDR] = a & 1;
1002
    /* XXX: re-enable interrupts and memory mapping */
1003
}
1004

    
1005
uint64_t helper_mfpr (int iprn, uint64_t val)
1006
{
1007
    uint64_t tmp;
1008

    
1009
    if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
1010
        val = tmp;
1011

    
1012
    return val;
1013
}
1014

    
1015
void helper_mtpr (int iprn, uint64_t val)
1016
{
1017
    cpu_alpha_mtpr(env, iprn, val, NULL);
1018
}
1019

    
1020
void helper_set_alt_mode (void)
1021
{
1022
    env->saved_mode = env->ps & 0xC;
1023
    env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
1024
}
1025

    
1026
void helper_restore_mode (void)
1027
{
1028
    env->ps = (env->ps & ~0xC) | env->saved_mode;
1029
}
1030

    
1031
#endif
1032

    
1033
/*****************************************************************************/
1034
/* Softmmu support */
1035
#if !defined (CONFIG_USER_ONLY)
1036

    
1037
/* XXX: the two following helpers are pure hacks.
1038
 *      Hopefully, we emulate the PALcode, then we should never see
1039
 *      HW_LD / HW_ST instructions.
1040
 */
1041
uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
1042
{
1043
    uint64_t tlb_addr, physaddr;
1044
    int index, mmu_idx;
1045
    void *retaddr;
1046

    
1047
    mmu_idx = cpu_mmu_index(env);
1048
    index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1049
 redo:
1050
    tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1051
    if ((virtaddr & TARGET_PAGE_MASK) ==
1052
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1053
        physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1054
    } else {
1055
        /* the page is not in the TLB : fill it */
1056
        retaddr = GETPC();
1057
        tlb_fill(virtaddr, 0, mmu_idx, retaddr);
1058
        goto redo;
1059
    }
1060
    return physaddr;
1061
}
1062

    
1063
uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
1064
{
1065
    uint64_t tlb_addr, physaddr;
1066
    int index, mmu_idx;
1067
    void *retaddr;
1068

    
1069
    mmu_idx = cpu_mmu_index(env);
1070
    index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1071
 redo:
1072
    tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1073
    if ((virtaddr & TARGET_PAGE_MASK) ==
1074
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1075
        physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1076
    } else {
1077
        /* the page is not in the TLB : fill it */
1078
        retaddr = GETPC();
1079
        tlb_fill(virtaddr, 1, mmu_idx, retaddr);
1080
        goto redo;
1081
    }
1082
    return physaddr;
1083
}
1084

    
1085
void helper_ldl_raw(uint64_t t0, uint64_t t1)
1086
{
1087
    ldl_raw(t1, t0);
1088
}
1089

    
1090
void helper_ldq_raw(uint64_t t0, uint64_t t1)
1091
{
1092
    ldq_raw(t1, t0);
1093
}
1094

    
1095
void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
1096
{
1097
    env->lock = t1;
1098
    ldl_raw(t1, t0);
1099
}
1100

    
1101
void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
1102
{
1103
    env->lock = t1;
1104
    ldl_raw(t1, t0);
1105
}
1106

    
1107
void helper_ldl_kernel(uint64_t t0, uint64_t t1)
1108
{
1109
    ldl_kernel(t1, t0);
1110
}
1111

    
1112
void helper_ldq_kernel(uint64_t t0, uint64_t t1)
1113
{
1114
    ldq_kernel(t1, t0);
1115
}
1116

    
1117
void helper_ldl_data(uint64_t t0, uint64_t t1)
1118
{
1119
    ldl_data(t1, t0);
1120
}
1121

    
1122
void helper_ldq_data(uint64_t t0, uint64_t t1)
1123
{
1124
    ldq_data(t1, t0);
1125
}
1126

    
1127
void helper_stl_raw(uint64_t t0, uint64_t t1)
1128
{
1129
    stl_raw(t1, t0);
1130
}
1131

    
1132
void helper_stq_raw(uint64_t t0, uint64_t t1)
1133
{
1134
    stq_raw(t1, t0);
1135
}
1136

    
1137
uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
1138
{
1139
    uint64_t ret;
1140

    
1141
    if (t1 == env->lock) {
1142
        stl_raw(t1, t0);
1143
        ret = 0;
1144
    } else
1145
        ret = 1;
1146

    
1147
    env->lock = 1;
1148

    
1149
    return ret;
1150
}
1151

    
1152
uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
1153
{
1154
    uint64_t ret;
1155

    
1156
    if (t1 == env->lock) {
1157
        stq_raw(t1, t0);
1158
        ret = 0;
1159
    } else
1160
        ret = 1;
1161

    
1162
    env->lock = 1;
1163

    
1164
    return ret;
1165
}
1166

    
1167
#define MMUSUFFIX _mmu
1168

    
1169
#define SHIFT 0
1170
#include "softmmu_template.h"
1171

    
1172
#define SHIFT 1
1173
#include "softmmu_template.h"
1174

    
1175
#define SHIFT 2
1176
#include "softmmu_template.h"
1177

    
1178
#define SHIFT 3
1179
#include "softmmu_template.h"
1180

    
1181
/* try to fill the TLB and return an exception if error. If retaddr is
1182
   NULL, it means that the function was called in C code (i.e. not
1183
   from generated code or from helper.c) */
1184
/* XXX: fix it to restore all registers */
1185
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1186
{
1187
    TranslationBlock *tb;
1188
    CPUState *saved_env;
1189
    unsigned long pc;
1190
    int ret;
1191

    
1192
    /* XXX: hack to restore env in all cases, even if not called from
1193
       generated code */
1194
    saved_env = env;
1195
    env = cpu_single_env;
1196
    ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1197
    if (!likely(ret == 0)) {
1198
        if (likely(retaddr)) {
1199
            /* now we have a real cpu fault */
1200
            pc = (unsigned long)retaddr;
1201
            tb = tb_find_pc(pc);
1202
            if (likely(tb)) {
1203
                /* the PC is inside the translated code. It means that we have
1204
                   a virtual CPU fault */
1205
                cpu_restore_state(tb, env, pc, NULL);
1206
            }
1207
        }
1208
        /* Exception index and error code are already set */
1209
        cpu_loop_exit();
1210
    }
1211
    env = saved_env;
1212
}
1213

    
1214
#endif