Revision 8d7b0fbb

b/target-i386/helper.c
553 553
        return 0xffff;
554 554
}
555 555

  
556
#ifdef TARGET_X86_64
557
#define SET_ESP(val, sp_mask)\
558
do {\
559
    if ((sp_mask) == 0xffff)\
560
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
561
    else if ((sp_mask) == 0xffffffffLL)\
562
        ESP = (uint32_t)(val);\
563
    else\
564
        ESP = (val);\
565
} while (0)
566
#else
567
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
568
#endif
569

  
556 570
/* XXX: add a is_user flag to have proper security support */
557 571
#define PUSHW(ssp, sp, sp_mask, val)\
558 572
{\
......
584 598
{
585 599
    SegmentCache *dt;
586 600
    target_ulong ptr, ssp;
587
    int type, dpl, selector, ss_dpl, cpl, sp_mask;
601
    int type, dpl, selector, ss_dpl, cpl;
588 602
    int has_error_code, new_stack, shift;
589 603
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
590
    uint32_t old_eip;
604
    uint32_t old_eip, sp_mask;
591 605

  
592 606
    has_error_code = 0;
593 607
    if (!is_int && !is_hw) {
......
623 637
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
624 638
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
625 639
        if (has_error_code) {
626
            int mask, type;
640
            int type;
641
            uint32_t mask;
627 642
            /* push the error code */
628 643
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
629 644
            shift = type >> 3;
......
637 652
                stl_kernel(ssp, error_code);
638 653
            else
639 654
                stw_kernel(ssp, error_code);
640
            ESP = (esp & mask) | (ESP & ~mask);
655
            SET_ESP(esp, mask);
641 656
        }
642 657
        return;
643 658
    case 6: /* 286 interrupt gate */
......
765 780
        cpu_x86_load_seg_cache(env, R_SS, ss, 
766 781
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
767 782
    }
768
    ESP = (ESP & ~sp_mask) | (esp & sp_mask);
783
    SET_ESP(esp, sp_mask);
769 784

  
770 785
    selector = (selector & ~3) | dpl;
771 786
    cpu_x86_load_seg_cache(env, R_CS, selector, 
......
2015 2030
        PUSHW(ssp, esp, esp_mask, next_eip);
2016 2031
    }
2017 2032

  
2018
    ESP = (ESP & ~esp_mask) | (esp & esp_mask);
2033
    SET_ESP(esp, esp_mask);
2019 2034
    env->eip = new_eip;
2020 2035
    env->segs[R_CS].selector = new_cs;
2021 2036
    env->segs[R_CS].base = (new_cs << 4);
......
2101 2116
            if (new_eip > limit)
2102 2117
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2103 2118
            /* from this point, not restartable */
2104
            ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2119
            SET_ESP(sp, sp_mask);
2105 2120
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2106 2121
                                   get_seg_base(e1, e2), limit, e2);
2107 2122
            EIP = new_eip;
......
2230 2245
                       get_seg_limit(e1, e2),
2231 2246
                       e2);
2232 2247
        cpu_x86_set_cpl(env, dpl);
2233
        ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2248
        SET_ESP(sp, sp_mask);
2234 2249
        EIP = offset;
2235 2250
    }
2236 2251
#ifdef USE_KQEMU
......
2459 2474

  
2460 2475
        sp += addend;
2461 2476
    }
2462
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2477
    SET_ESP(sp, sp_mask);
2463 2478
    env->eip = new_eip;
2464 2479
    if (is_iret) {
2465 2480
        /* NOTE: 'cpl' is the _old_ CPL */

Also available in: Unified diff