Revision 248c42f3 target-alpha/translate.c

b/target-alpha/translate.c
514 514
FCMOV(cmpfle)
515 515
FCMOV(cmpfgt)
516 516

  
517
static inline uint64_t zapnot_mask(uint8_t lit)
518
{
519
    uint64_t mask = 0;
520
    int i;
521

  
522
    for (i = 0; i < 8; ++i) {
523
        if ((lit >> i) & 1)
524
            mask |= 0xffull << (i * 8);
525
    }
526
    return mask;
527
}
528

  
517 529
/* Implement zapnot with an immediate operand, which expands to some
518 530
   form of immediate AND.  This is a basic building block in the
519 531
   definition of many of the other byte manipulation instructions.  */
520
static inline void gen_zapnoti(int ra, int rc, uint8_t lit)
532
static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
521 533
{
522
    uint64_t mask;
523
    int i;
524

  
525 534
    switch (lit) {
526 535
    case 0x00:
527
        tcg_gen_movi_i64(cpu_ir[rc], 0);
536
        tcg_gen_movi_i64(dest, 0);
528 537
        break;
529 538
    case 0x01:
530
        tcg_gen_ext8u_i64(cpu_ir[rc], cpu_ir[ra]);
539
        tcg_gen_ext8u_i64(dest, src);
531 540
        break;
532 541
    case 0x03:
533
        tcg_gen_ext16u_i64(cpu_ir[rc], cpu_ir[ra]);
542
        tcg_gen_ext16u_i64(dest, src);
534 543
        break;
535 544
    case 0x0f:
536
        tcg_gen_ext32u_i64(cpu_ir[rc], cpu_ir[ra]);
545
        tcg_gen_ext32u_i64(dest, src);
537 546
        break;
538 547
    case 0xff:
539
        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[ra]);
548
        tcg_gen_mov_i64(dest, src);
540 549
        break;
541 550
    default:
542
        for (mask = i = 0; i < 8; ++i) {
543
            if ((lit >> i) & 1)
544
                mask |= 0xffull << (i * 8);
545
        }
546
        tcg_gen_andi_i64 (cpu_ir[rc], cpu_ir[ra], mask);
551
        tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
547 552
        break;
548 553
    }
549 554
}
......
555 560
    else if (unlikely(ra == 31))
556 561
        tcg_gen_movi_i64(cpu_ir[rc], 0);
557 562
    else if (islit)
558
        gen_zapnoti(ra, rc, lit);
563
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
559 564
    else
560 565
        gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
561 566
}
......
567 572
    else if (unlikely(ra == 31))
568 573
        tcg_gen_movi_i64(cpu_ir[rc], 0);
569 574
    else if (islit)
570
        gen_zapnoti(ra, rc, ~lit);
575
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
571 576
    else
572 577
        gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
573 578
}
574 579

  
575 580

  
576
/* EXTWH, EXTWH, EXTLH, EXTQH */
581
/* EXTWH, EXTLH, EXTQH */
577 582
static inline void gen_ext_h(int ra, int rb, int rc, int islit,
578 583
                             uint8_t lit, uint8_t byte_mask)
579 584
{
......
594 599
            tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
595 600
            tcg_temp_free(tmp1);
596 601
        }
597
        gen_zapnoti(rc, rc, byte_mask);
602
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
598 603
    }
599 604
}
600 605

  
601
/* EXTBL, EXTWL, EXTWL, EXTLL, EXTQL */
606
/* EXTBL, EXTWL, EXTLL, EXTQL */
602 607
static inline void gen_ext_l(int ra, int rb, int rc, int islit,
603 608
                             uint8_t lit, uint8_t byte_mask)
604 609
{
......
616 621
            tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
617 622
            tcg_temp_free(tmp);
618 623
        }
619
        gen_zapnoti(rc, rc, byte_mask);
624
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
625
    }
626
}
627

  
628
/* INSBL, INSWL, INSLL, INSQL */
629
static inline void gen_ins_l(int ra, int rb, int rc, int islit,
630
                             uint8_t lit, uint8_t byte_mask)
631
{
632
    if (unlikely(rc == 31))
633
        return;
634
    else if (unlikely(ra == 31))
635
        tcg_gen_movi_i64(cpu_ir[rc], 0);
636
    else {
637
        TCGv tmp = tcg_temp_new();
638

  
639
        /* The instruction description has us left-shift the byte mask
640
           the same number of byte slots as the data and apply the zap
641
           at the end.  This is equivalent to simply performing the zap
642
           first and shifting afterward.  */
643
        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
644

  
645
        if (islit) {
646
            tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
647
        } else {
648
            TCGv shift = tcg_temp_new();
649
            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
650
            tcg_gen_shli_i64(shift, shift, 3);
651
            tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
652
            tcg_temp_free(shift);
653
        }
654
        tcg_temp_free(tmp);
620 655
    }
621 656
}
622 657

  
......
652 687
ARITH3(addqv)
653 688
ARITH3(subqv)
654 689
ARITH3(mskbl)
655
ARITH3(insbl)
656 690
ARITH3(mskwl)
657
ARITH3(inswl)
658 691
ARITH3(mskll)
659
ARITH3(insll)
660 692
ARITH3(mskql)
661
ARITH3(insql)
662 693
ARITH3(mskwh)
663 694
ARITH3(inswh)
664 695
ARITH3(msklh)
......
1291 1322
            break;
1292 1323
        case 0x0B:
1293 1324
            /* INSBL */
1294
            gen_insbl(ra, rb, rc, islit, lit);
1325
            gen_ins_l(ra, rb, rc, islit, lit, 0x01);
1295 1326
            break;
1296 1327
        case 0x12:
1297 1328
            /* MSKWL */
......
1303 1334
            break;
1304 1335
        case 0x1B:
1305 1336
            /* INSWL */
1306
            gen_inswl(ra, rb, rc, islit, lit);
1337
            gen_ins_l(ra, rb, rc, islit, lit, 0x03);
1307 1338
            break;
1308 1339
        case 0x22:
1309 1340
            /* MSKLL */
......
1315 1346
            break;
1316 1347
        case 0x2B:
1317 1348
            /* INSLL */
1318
            gen_insll(ra, rb, rc, islit, lit);
1349
            gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
1319 1350
            break;
1320 1351
        case 0x30:
1321 1352
            /* ZAP */
......
1367 1398
            break;
1368 1399
        case 0x3B:
1369 1400
            /* INSQL */
1370
            gen_insql(ra, rb, rc, islit, lit);
1401
            gen_ins_l(ra, rb, rc, islit, lit, 0xff);
1371 1402
            break;
1372 1403
        case 0x3C:
1373 1404
            /* SRA */

Also available in: Unified diff