Revision 5fafdf24 target-i386/helper.c

b/target-i386/helper.c
1 1
/*
2 2
 *  i386 helpers
3
 * 
3
 *
4 4
 *  Copyright (c) 2003 Fabrice Bellard
5 5
 *
6 6
 * This library is free software; you can redistribute it and/or
......
67 67

  
68 68
/* modulo 17 table */
69 69
const uint8_t rclw_table[32] = {
70
    0, 1, 2, 3, 4, 5, 6, 7, 
70
    0, 1, 2, 3, 4, 5, 6, 7,
71 71
    8, 9,10,11,12,13,14,15,
72 72
   16, 0, 1, 2, 3, 4, 5, 6,
73 73
    7, 8, 9,10,11,12,13,14,
......
75 75

  
76 76
/* modulo 9 table */
77 77
const uint8_t rclb_table[32] = {
78
    0, 1, 2, 3, 4, 5, 6, 7, 
78
    0, 1, 2, 3, 4, 5, 6, 7,
79 79
    8, 0, 1, 2, 3, 4, 5, 6,
80
    7, 8, 0, 1, 2, 3, 4, 5, 
80
    7, 8, 0, 1, 2, 3, 4, 5,
81 81
    6, 7, 8, 0, 1, 2, 3, 4,
82 82
};
83 83

  
......
91 91
    1.44269504088896340739L,  /*l2e*/
92 92
    3.32192809488736234781L,  /*l2t*/
93 93
};
94
    
94
   
95 95
/* thread support */
96 96

  
97 97
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
......
126 126
    *e2_ptr = ldl_kernel(ptr + 4);
127 127
    return 0;
128 128
}
129
                                     
129
                                    
130 130
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
131 131
{
132 132
    unsigned int limit;
......
152 152
static inline void load_seg_vm(int seg, int selector)
153 153
{
154 154
    selector &= 0xffff;
155
    cpu_x86_load_seg_cache(env, seg, selector, 
155
    cpu_x86_load_seg_cache(env, seg, selector,
156 156
                           (selector << 4), 0xffff, 0);
157 157
}
158 158

  
159
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
159
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
160 160
                                       uint32_t *esp_ptr, int dpl)
161 161
{
162 162
    int type, index, shift;
163
    
163
   
164 164
#if 0
165 165
    {
166 166
        int i;
......
231 231
        }
232 232
        if (!(e2 & DESC_P_MASK))
233 233
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
234
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
234
        cpu_x86_load_seg_cache(env, seg_reg, selector,
235 235
                       get_seg_base(e1, e2),
236 236
                       get_seg_limit(e1, e2),
237 237
                       e2);
238 238
    } else {
239
        if (seg_reg == R_SS || seg_reg == R_CS) 
239
        if (seg_reg == R_SS || seg_reg == R_CS)
240 240
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 241
    }
242 242
}
......
246 246
#define SWITCH_TSS_CALL 2
247 247

  
248 248
/* XXX: restore CPU state in registers (PowerPC case) */
249
static void switch_tss(int tss_selector, 
249
static void switch_tss(int tss_selector,
250 250
                       uint32_t e1, uint32_t e2, int source,
251 251
                       uint32_t next_eip)
252 252
{
......
290 290
        tss_limit_max = 43;
291 291
    tss_limit = get_seg_limit(e1, e2);
292 292
    tss_base = get_seg_base(e1, e2);
293
    if ((tss_selector & 4) != 0 || 
293
    if ((tss_selector & 4) != 0 ||
294 294
        tss_limit < tss_limit_max)
295 295
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
296 296
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
......
325 325
        new_segs[R_GS] = 0;
326 326
        new_trap = 0;
327 327
    }
328
    
328
   
329 329
    /* NOTE: we must avoid memory exceptions during the task switch,
330 330
       so we make dummy accesses before */
331 331
    /* XXX: it can still fail in some cases, so a bigger hack is
......
335 335
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
336 336
    stb_kernel(env->tr.base, v1);
337 337
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
338
    
338
   
339 339
    /* clear busy bit (it is restartable) */
340 340
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
341 341
        target_ulong ptr;
......
348 348
    old_eflags = compute_eflags();
349 349
    if (source == SWITCH_TSS_IRET)
350 350
        old_eflags &= ~NT_MASK;
351
    
351
   
352 352
    /* save the current state in the old TSS */
353 353
    if (type & 8) {
354 354
        /* 32 bit */
......
379 379
        for(i = 0; i < 4; i++)
380 380
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
381 381
    }
382
    
382
   
383 383
    /* now if an exception occurs, it will occurs in the next task
384 384
       context */
385 385

  
......
406 406
    env->tr.base = tss_base;
407 407
    env->tr.limit = tss_limit;
408 408
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
409
    
409
   
410 410
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
411 411
        cpu_x86_update_cr3(env, new_cr3);
412 412
    }
413
    
413
   
414 414
    /* load all registers without an exception, then reload them with
415 415
       possible exception */
416 416
    env->eip = new_eip;
417
    eflags_mask = TF_MASK | AC_MASK | ID_MASK | 
417
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
418 418
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
419 419
    if (!(type & 8))
420 420
        eflags_mask &= 0xffff;
......
429 429
    ESI = new_regs[6];
430 430
    EDI = new_regs[7];
431 431
    if (new_eflags & VM_MASK) {
432
        for(i = 0; i < 6; i++) 
432
        for(i = 0; i < 6; i++)
433 433
            load_seg_vm(i, new_segs[i]);
434 434
        /* in vm86, CPL is always 3 */
435 435
        cpu_x86_set_cpl(env, 3);
......
440 440
        for(i = 0; i < 6; i++)
441 441
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
442 442
    }
443
    
443
   
444 444
    env->ldt.selector = new_ldt & ~4;
445 445
    env->ldt.base = 0;
446 446
    env->ldt.limit = 0;
......
464 464
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
465 465
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
466 466
    }
467
    
467
   
468 468
    /* load the segments */
469 469
    if (!(new_eflags & VM_MASK)) {
470 470
        tss_load_seg(R_CS, new_segs[R_CS]);
......
474 474
        tss_load_seg(R_FS, new_segs[R_FS]);
475 475
        tss_load_seg(R_GS, new_segs[R_GS]);
476 476
    }
477
    
477
   
478 478
    /* check that EIP is in the CS segment limits */
479 479
    if (new_eip > env->segs[R_CS].limit) {
480 480
        /* XXX: different exception if CALL ? */
......
486 486
static inline void check_io(int addr, int size)
487 487
{
488 488
    int io_offset, val, mask;
489
    
489
   
490 490
    /* TSS must be a valid 32 bit one */
491 491
    if (!(env->tr.flags & DESC_P_MASK) ||
492 492
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
......
760 760
            PUSHW(ssp, esp, sp_mask, error_code);
761 761
        }
762 762
    }
763
    
763
   
764 764
    if (new_stack) {
765 765
        if (env->eflags & VM_MASK) {
766 766
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
......
769 769
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
770 770
        }
771 771
        ss = (ss & ~3) | dpl;
772
        cpu_x86_load_seg_cache(env, R_SS, ss, 
772
        cpu_x86_load_seg_cache(env, R_SS, ss,
773 773
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
774 774
    }
775 775
    SET_ESP(esp, sp_mask);
776 776

  
777 777
    selector = (selector & ~3) | dpl;
778
    cpu_x86_load_seg_cache(env, R_CS, selector, 
778
    cpu_x86_load_seg_cache(env, R_CS, selector,
779 779
                   get_seg_base(e1, e2),
780 780
                   get_seg_limit(e1, e2),
781 781
                   e2);
......
806 806
static inline target_ulong get_rsp_from_tss(int level)
807 807
{
808 808
    int index;
809
    
809
   
810 810
#if 0
811
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 
811
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
812 812
           env->tr.base, env->tr.limit);
813 813
#endif
814 814

  
......
926 926
    if (has_error_code) {
927 927
        PUSHQ(esp, error_code);
928 928
    }
929
    
929
   
930 930
    if (new_stack) {
931 931
        ss = 0 | dpl;
932 932
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
......
934 934
    ESP = esp;
935 935

  
936 936
    selector = (selector & ~3) | dpl;
937
    cpu_x86_load_seg_cache(env, R_CS, selector, 
937
    cpu_x86_load_seg_cache(env, R_CS, selector,
938 938
                   get_seg_base(e1, e2),
939 939
                   get_seg_limit(e1, e2),
940 940
                   e2);
......
963 963

  
964 964
        ECX = env->eip + next_eip_addend;
965 965
        env->regs[11] = compute_eflags();
966
        
966
       
967 967
        code64 = env->hflags & HF_CS64_MASK;
968 968

  
969 969
        cpu_x86_set_cpl(env, 0);
970
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 
971
                           0, 0xffffffff, 
970
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
971
                           0, 0xffffffff,
972 972
                               DESC_G_MASK | DESC_P_MASK |
973 973
                               DESC_S_MASK |
974 974
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
975
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 
975
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
976 976
                               0, 0xffffffff,
977 977
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
978 978
                               DESC_S_MASK |
......
982 982
            env->eip = env->lstar;
983 983
        else
984 984
            env->eip = env->cstar;
985
    } else 
985
    } else
986 986
#endif
987 987
    {
988 988
        ECX = (uint32_t)(env->eip + next_eip_addend);
989
        
989
       
990 990
        cpu_x86_set_cpl(env, 0);
991
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 
992
                           0, 0xffffffff, 
991
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
992
                           0, 0xffffffff,
993 993
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
994 994
                               DESC_S_MASK |
995 995
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
996
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 
996
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
997 997
                               0, 0xffffffff,
998 998
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
999 999
                               DESC_S_MASK |
......
1018 1018
#ifdef TARGET_X86_64
1019 1019
    if (env->hflags & HF_LMA_MASK) {
1020 1020
        if (dflag == 2) {
1021
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 
1022
                                   0, 0xffffffff, 
1021
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1022
                                   0, 0xffffffff,
1023 1023
                                   DESC_G_MASK | DESC_P_MASK |
1024 1024
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1025
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 
1025
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1026 1026
                                   DESC_L_MASK);
1027 1027
            env->eip = ECX;
1028 1028
        } else {
1029
            cpu_x86_load_seg_cache(env, R_CS, selector | 3, 
1030
                                   0, 0xffffffff, 
1029
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1030
                                   0, 0xffffffff,
1031 1031
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032 1032
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1033 1033
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1034 1034
            env->eip = (uint32_t)ECX;
1035 1035
        }
1036
        cpu_x86_load_seg_cache(env, R_SS, selector + 8, 
1036
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1037 1037
                               0, 0xffffffff,
1038 1038
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1039 1039
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1040 1040
                               DESC_W_MASK | DESC_A_MASK);
1041
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK | 
1041
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1042 1042
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1043 1043
        cpu_x86_set_cpl(env, 3);
1044
    } else 
1044
    } else
1045 1045
#endif
1046 1046
    {
1047
        cpu_x86_load_seg_cache(env, R_CS, selector | 3, 
1048
                               0, 0xffffffff, 
1047
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1048
                               0, 0xffffffff,
1049 1049
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1050 1050
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1051 1051
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1052 1052
        env->eip = (uint32_t)ECX;
1053
        cpu_x86_load_seg_cache(env, R_SS, selector + 8, 
1053
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1054 1054
                               0, 0xffffffff,
1055 1055
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1056 1056
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
......
1096 1096
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1097 1097
    PUSHW(ssp, esp, 0xffff, old_cs);
1098 1098
    PUSHW(ssp, esp, 0xffff, old_eip);
1099
    
1099
   
1100 1100
    /* update processor state */
1101 1101
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1102 1102
    env->eip = offset;
......
1106 1106
}
1107 1107

  
1108 1108
/* fake user mode interrupt */
1109
void do_interrupt_user(int intno, int is_int, int error_code, 
1109
void do_interrupt_user(int intno, int is_int, int error_code,
1110 1110
                       target_ulong next_eip)
1111 1111
{
1112 1112
    SegmentCache *dt;
......
1117 1117
    dt = &env->idt;
1118 1118
    ptr = dt->base + (intno * 8);
1119 1119
    e2 = ldl_kernel(ptr + 4);
1120
    
1120
   
1121 1121
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1122 1122
    cpl = env->hflags & HF_CPL_MASK;
1123 1123
    /* check privledge if software int */
......
1134 1134
/*
1135 1135
 * Begin execution of an interruption. is_int is TRUE if coming from
1136 1136
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1137
 * instruction. It is only relevant if is_int is TRUE.  
1137
 * instruction. It is only relevant if is_int is TRUE. 
1138 1138
 */
1139
void do_interrupt(int intno, int is_int, int error_code, 
1139
void do_interrupt(int intno, int is_int, int error_code,
1140 1140
                  target_ulong next_eip, int is_hw)
1141 1141
{
1142 1142
    if (loglevel & CPU_LOG_INT) {
......
1222 1222
 * Signal an interruption. It is executed in the main CPU loop.
1223 1223
 * is_int is TRUE if coming from the int instruction. next_eip is the
1224 1224
 * EIP value AFTER the interrupt instruction. It is only relevant if
1225
 * is_int is TRUE.  
1225
 * is_int is TRUE. 
1226 1226
 */
1227
void raise_interrupt(int intno, int is_int, int error_code, 
1227
void raise_interrupt(int intno, int is_int, int error_code,
1228 1228
                     int next_eip_addend)
1229 1229
{
1230 1230
    if (!is_int)
......
1263 1263

  
1264 1264
/* SMM support */
1265 1265

  
1266
#if defined(CONFIG_USER_ONLY) 
1266
#if defined(CONFIG_USER_ONLY)
1267 1267

  
1268 1268
void do_smm_enter(void)
1269 1269
{
......
1296 1296
    cpu_smm_update(env);
1297 1297

  
1298 1298
    sm_state = env->smbase + 0x8000;
1299
    
1299
   
1300 1300
#ifdef TARGET_X86_64
1301 1301
    for(i = 0; i < 6; i++) {
1302 1302
        dt = &env->segs[i];
......
1314 1314
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1315 1315
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1316 1316
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1317
    
1317
   
1318 1318
    stq_phys(sm_state + 0x7e88, env->idt.base);
1319 1319
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1320 1320

  
......
1322 1322
    stq_phys(sm_state + 0x7e98, env->tr.base);
1323 1323
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1324 1324
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1325
    
1325
   
1326 1326
    stq_phys(sm_state + 0x7ed0, env->efer);
1327 1327

  
1328 1328
    stq_phys(sm_state + 0x7ff8, EAX);
......
1333 1333
    stq_phys(sm_state + 0x7fd0, EBP);
1334 1334
    stq_phys(sm_state + 0x7fc8, ESI);
1335 1335
    stq_phys(sm_state + 0x7fc0, EDI);
1336
    for(i = 8; i < 16; i++) 
1336
    for(i = 8; i < 16; i++)
1337 1337
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1338 1338
    stq_phys(sm_state + 0x7f78, env->eip);
1339 1339
    stl_phys(sm_state + 0x7f70, compute_eflags());
......
1361 1361
    stl_phys(sm_state + 0x7fd0, EAX);
1362 1362
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1363 1363
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1364
    
1364
   
1365 1365
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1366 1366
    stl_phys(sm_state + 0x7f64, env->tr.base);
1367 1367
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1368 1368
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1369
    
1369
   
1370 1370
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1371 1371
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1372 1372
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1373 1373
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1374
    
1374
   
1375 1375
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1376 1376
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1377 1377

  
......
1409 1409
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1410 1410
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1411 1411
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1412
    
1413
    cpu_x86_update_cr0(env, 
1412
   
1413
    cpu_x86_update_cr0(env,
1414 1414
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1415 1415
    cpu_x86_update_cr4(env, 0);
1416 1416
    env->dr[7] = 0x00000400;
......
1433 1433

  
1434 1434
    for(i = 0; i < 6; i++) {
1435 1435
        offset = 0x7e00 + i * 16;
1436
        cpu_x86_load_seg_cache(env, i, 
1436
        cpu_x86_load_seg_cache(env, i,
1437 1437
                               lduw_phys(sm_state + offset),
1438 1438
                               ldq_phys(sm_state + offset + 8),
1439 1439
                               ldl_phys(sm_state + offset + 4),
......
1447 1447
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1448 1448
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1449 1449
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1450
    
1450
   
1451 1451
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1452 1452
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1453 1453

  
......
1455 1455
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1456 1456
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1457 1457
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1458
    
1458
   
1459 1459
    EAX = ldq_phys(sm_state + 0x7ff8);
1460 1460
    ECX = ldq_phys(sm_state + 0x7ff0);
1461 1461
    EDX = ldq_phys(sm_state + 0x7fe8);
......
1464 1464
    EBP = ldq_phys(sm_state + 0x7fd0);
1465 1465
    ESI = ldq_phys(sm_state + 0x7fc8);
1466 1466
    EDI = ldq_phys(sm_state + 0x7fc0);
1467
    for(i = 8; i < 16; i++) 
1467
    for(i = 8; i < 16; i++)
1468 1468
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1469 1469
    env->eip = ldq_phys(sm_state + 0x7f78);
1470
    load_eflags(ldl_phys(sm_state + 0x7f70), 
1470
    load_eflags(ldl_phys(sm_state + 0x7f70),
1471 1471
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1472 1472
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1473 1473
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
......
1483 1483
#else
1484 1484
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1485 1485
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1486
    load_eflags(ldl_phys(sm_state + 0x7ff4), 
1486
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1487 1487
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1488 1488
    env->eip = ldl_phys(sm_state + 0x7ff0);
1489 1489
    EDI = ldl_phys(sm_state + 0x7fec);
......
1496 1496
    EAX = ldl_phys(sm_state + 0x7fd0);
1497 1497
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1498 1498
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1499
    
1499
   
1500 1500
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1501 1501
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1502 1502
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1503 1503
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1504
    
1504
   
1505 1505
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1506 1506
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1507 1507
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1508 1508
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1509
    
1509
   
1510 1510
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1511 1511
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1512 1512

  
......
1518 1518
            offset = 0x7f84 + i * 12;
1519 1519
        else
1520 1520
            offset = 0x7f2c + (i - 3) * 12;
1521
        cpu_x86_load_seg_cache(env, i, 
1521
        cpu_x86_load_seg_cache(env, i,
1522 1522
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1523 1523
                               ldl_phys(sm_state + offset + 8),
1524 1524
                               ldl_phys(sm_state + offset + 4),
......
1564 1564
{
1565 1565
    unsigned int den, r;
1566 1566
    uint64_t num, q;
1567
    
1567
   
1568 1568
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1569 1569
    den = T0;
1570 1570
    if (den == 0) {
......
1586 1586
{
1587 1587
    int den, r;
1588 1588
    int64_t num, q;
1589
    
1589
   
1590 1590
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1591 1591
    den = T0;
1592 1592
    if (den == 0) {
......
1632 1632
{
1633 1633
    uint32_t index;
1634 1634
    index = (uint32_t)EAX;
1635
    
1635
   
1636 1636
    /* test if maximum index reached */
1637 1637
    if (index & 0x80000000) {
1638
        if (index > env->cpuid_xlevel) 
1638
        if (index > env->cpuid_xlevel)
1639 1639
            index = env->cpuid_level;
1640 1640
    } else {
1641
        if (index > env->cpuid_level) 
1641
        if (index > env->cpuid_level)
1642 1642
            index = env->cpuid_level;
1643 1643
    }
1644
        
1644
       
1645 1645
    switch(index) {
1646 1646
    case 0:
1647 1647
        EAX = env->cpuid_level;
......
1783 1783
    uint32_t e1, e2;
1784 1784
    int index, entry_limit;
1785 1785
    target_ulong ptr;
1786
    
1786
   
1787 1787
    selector = T0 & 0xffff;
1788 1788
    if ((selector & 0xfffc) == 0) {
1789 1789
        /* XXX: NULL selector case: invalid LDT */
......
1798 1798
        if (env->hflags & HF_LMA_MASK)
1799 1799
            entry_limit = 15;
1800 1800
        else
1801
#endif            
1801
#endif           
1802 1802
            entry_limit = 7;
1803 1803
        if ((index + entry_limit) > dt->limit)
1804 1804
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
......
1831 1831
    uint32_t e1, e2;
1832 1832
    int index, type, entry_limit;
1833 1833
    target_ulong ptr;
1834
    
1834
   
1835 1835
    selector = T0 & 0xffff;
1836 1836
    if ((selector & 0xfffc) == 0) {
1837 1837
        /* NULL selector case: invalid TR */
......
1847 1847
        if (env->hflags & HF_LMA_MASK)
1848 1848
            entry_limit = 15;
1849 1849
        else
1850
#endif            
1850
#endif           
1851 1851
            entry_limit = 7;
1852 1852
        if ((index + entry_limit) > dt->limit)
1853 1853
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
......
1855 1855
        e1 = ldl_kernel(ptr);
1856 1856
        e2 = ldl_kernel(ptr + 4);
1857 1857
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1858
        if ((e2 & DESC_S_MASK) || 
1858
        if ((e2 & DESC_S_MASK) ||
1859 1859
            (type != 1 && type != 9))
1860 1860
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1861 1861
        if (!(e2 & DESC_P_MASK))
......
1869 1869
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1870 1870
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1871 1871
            env->tr.base |= (target_ulong)e3 << 32;
1872
        } else 
1872
        } else
1873 1873
#endif
1874 1874
        {
1875 1875
            load_seg_cache_raw_dt(&env->tr, e1, e2);
......
1901 1901
            raise_exception_err(EXCP0D_GPF, 0);
1902 1902
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1903 1903
    } else {
1904
        
1904
       
1905 1905
        if (selector & 0x4)
1906 1906
            dt = &env->ldt;
1907 1907
        else
......
1912 1912
        ptr = dt->base + index;
1913 1913
        e1 = ldl_kernel(ptr);
1914 1914
        e2 = ldl_kernel(ptr + 4);
1915
        
1915
       
1916 1916
        if (!(e2 & DESC_S_MASK))
1917 1917
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1918 1918
        rpl = selector & 3;
......
1927 1927
            /* must be readable segment */
1928 1928
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1929 1929
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1930
            
1930
           
1931 1931
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1932 1932
                /* if not conforming code, test rights */
1933
                if (dpl < cpl || dpl < rpl) 
1933
                if (dpl < cpl || dpl < rpl)
1934 1934
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1935 1935
            }
1936 1936
        }
......
1948 1948
            stl_kernel(ptr + 4, e2);
1949 1949
        }
1950 1950

  
1951
        cpu_x86_load_seg_cache(env, seg_reg, selector, 
1951
        cpu_x86_load_seg_cache(env, seg_reg, selector,
1952 1952
                       get_seg_base(e1, e2),
1953 1953
                       get_seg_limit(e1, e2),
1954 1954
                       e2);
1955 1955
#if 0
1956
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
1956
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1957 1957
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1958 1958
#endif
1959 1959
    }
......
1965 1965
    int new_cs, gate_cs, type;
1966 1966
    uint32_t e1, e2, cpl, dpl, rpl, limit;
1967 1967
    target_ulong new_eip, next_eip;
1968
    
1968
   
1969 1969
    new_cs = T0;
1970 1970
    new_eip = T1;
1971 1971
    if ((new_cs & 0xfffc) == 0)
......
1992 1992
        if (!(e2 & DESC_P_MASK))
1993 1993
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1994 1994
        limit = get_seg_limit(e1, e2);
1995
        if (new_eip > limit && 
1995
        if (new_eip > limit &&
1996 1996
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
1997 1997
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1998 1998
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
......
2028 2028
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2029 2029
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2030 2030
            /* must be code segment */
2031
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 
2031
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2032 2032
                 (DESC_S_MASK | DESC_CS_MASK)))
2033 2033
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2034
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 
2034
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2035 2035
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2036 2036
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2037 2037
            if (!(e2 & DESC_P_MASK))
......
2084 2084
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2085 2085
    uint32_t val, limit, old_sp_mask;
2086 2086
    target_ulong ssp, old_ssp, next_eip, new_eip;
2087
    
2087
   
2088 2088
    new_cs = T0;
2089 2089
    new_eip = T1;
2090 2090
    next_eip = env->eip + next_eip_addend;
......
2135 2135
            /* from this point, not restartable */
2136 2136
            ESP = rsp;
2137 2137
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2138
                                   get_seg_base(e1, e2), 
2138
                                   get_seg_base(e1, e2),
2139 2139
                                   get_seg_limit(e1, e2), e2);
2140 2140
            EIP = new_eip;
2141
        } else 
2141
        } else
2142 2142
#endif
2143 2143
        {
2144 2144
            sp = ESP;
......
2151 2151
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2152 2152
                PUSHW(ssp, sp, sp_mask, next_eip);
2153 2153
            }
2154
            
2154
           
2155 2155
            limit = get_seg_limit(e1, e2);
2156 2156
            if (new_eip > limit)
2157 2157
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
......
2210 2210
            get_ss_esp_from_tss(&ss, &sp, dpl);
2211 2211
#ifdef DEBUG_PCALL
2212 2212
            if (loglevel & CPU_LOG_PCALL)
2213
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n", 
2213
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2214 2214
                        ss, sp, param_count, ESP);
2215 2215
#endif
2216 2216
            if ((ss & 0xfffc) == 0)
......
2228 2228
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2229 2229
            if (!(ss_e2 & DESC_P_MASK))
2230 2230
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2231
            
2231
           
2232 2232
            //            push_size = ((param_count * 2) + 8) << shift;
2233 2233

  
2234 2234
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2235 2235
            old_ssp = env->segs[R_SS].base;
2236
            
2236
           
2237 2237
            sp_mask = get_sp_mask(ss_e2);
2238 2238
            ssp = get_seg_base(ss_e1, ss_e2);
2239 2239
            if (shift) {
......
2273 2273

  
2274 2274
        if (new_stack) {
2275 2275
            ss = (ss & ~3) | dpl;
2276
            cpu_x86_load_seg_cache(env, R_SS, ss, 
2276
            cpu_x86_load_seg_cache(env, R_SS, ss,
2277 2277
                                   ssp,
2278 2278
                                   get_seg_limit(ss_e1, ss_e2),
2279 2279
                                   ss_e2);
2280 2280
        }
2281 2281

  
2282 2282
        selector = (selector & ~3) | dpl;
2283
        cpu_x86_load_seg_cache(env, R_CS, selector, 
2283
        cpu_x86_load_seg_cache(env, R_CS, selector,
2284 2284
                       get_seg_base(e1, e2),
2285 2285
                       get_seg_limit(e1, e2),
2286 2286
                       e2);
......
2338 2338
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2339 2339
       they may still contain a valid base. I would be interested to
2340 2340
       know how a real x86_64 CPU behaves */
2341
    if ((seg_reg == R_FS || seg_reg == R_GS) && 
2341
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2342 2342
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2343 2343
        return;
2344 2344

  
......
2360 2360
    uint32_t e1, e2, ss_e1, ss_e2;
2361 2361
    int cpl, dpl, rpl, eflags_mask, iopl;
2362 2362
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2363
    
2363
   
2364 2364
#ifdef TARGET_X86_64
2365 2365
    if (shift == 2)
2366 2366
        sp_mask = -1;
......
2412 2412
        !(e2 & DESC_CS_MASK))
2413 2413
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2414 2414
    cpl = env->hflags & HF_CPL_MASK;
2415
    rpl = new_cs & 3; 
2415
    rpl = new_cs & 3;
2416 2416
    if (rpl < cpl)
2417 2417
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2418 2418
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
......
2425 2425
    }
2426 2426
    if (!(e2 & DESC_P_MASK))
2427 2427
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2428
    
2428
   
2429 2429
    sp += addend;
2430
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 
2430
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2431 2431
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2432 2432
        /* return to same priledge level */
2433
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
2433
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2434 2434
                       get_seg_base(e1, e2),
2435 2435
                       get_seg_limit(e1, e2),
2436 2436
                       e2);
......
2464 2464
            /* NULL ss is allowed in long mode if cpl != 3*/
2465 2465
            /* XXX: test CS64 ? */
2466 2466
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2467
                cpu_x86_load_seg_cache(env, R_SS, new_ss, 
2467
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2468 2468
                                       0, 0xffffffff,
2469 2469
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2470 2470
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2471 2471
                                       DESC_W_MASK | DESC_A_MASK);
2472 2472
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2473
            } else 
2473
            } else
2474 2474
#endif
2475 2475
            {
2476 2476
                raise_exception_err(EXCP0D_GPF, 0);
......
2489 2489
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2490 2490
            if (!(ss_e2 & DESC_P_MASK))
2491 2491
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2492
            cpu_x86_load_seg_cache(env, R_SS, new_ss, 
2492
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2493 2493
                                   get_seg_base(ss_e1, ss_e2),
2494 2494
                                   get_seg_limit(ss_e1, ss_e2),
2495 2495
                                   ss_e2);
2496 2496
        }
2497 2497

  
2498
        cpu_x86_load_seg_cache(env, R_CS, new_cs, 
2498
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2499 2499
                       get_seg_base(e1, e2),
2500 2500
                       get_seg_limit(e1, e2),
2501 2501
                       e2);
......
2539 2539
    POPL(ssp, sp, sp_mask, new_ds);
2540 2540
    POPL(ssp, sp, sp_mask, new_fs);
2541 2541
    POPL(ssp, sp, sp_mask, new_gs);
2542
    
2542
   
2543 2543
    /* modify processor state */
2544
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | 
2544
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2545 2545
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2546 2546
    load_seg_vm(R_CS, new_cs & 0xffff);
2547 2547
    cpu_x86_set_cpl(env, 3);
......
2559 2559
{
2560 2560
    int tss_selector, type;
2561 2561
    uint32_t e1, e2;
2562
    
2562
   
2563 2563
    /* specific case for TSS */
2564 2564
    if (env->eflags & NT_MASK) {
2565 2565
#ifdef TARGET_X86_64
......
2606 2606
    }
2607 2607
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2608 2608
    cpu_x86_set_cpl(env, 0);
2609
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 
2610
                           0, 0xffffffff, 
2609
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2610
                           0, 0xffffffff,
2611 2611
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2612 2612
                           DESC_S_MASK |
2613 2613
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2614
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 
2614
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2615 2615
                           0, 0xffffffff,
2616 2616
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2617 2617
                           DESC_S_MASK |
......
2629 2629
        raise_exception_err(EXCP0D_GPF, 0);
2630 2630
    }
2631 2631
    cpu_x86_set_cpl(env, 3);
2632
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, 
2633
                           0, 0xffffffff, 
2632
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2633
                           0, 0xffffffff,
2634 2634
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2635 2635
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2636 2636
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2637
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, 
2637
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2638 2638
                           0, 0xffffffff,
2639 2639
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2640 2640
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
......
2651 2651

  
2652 2652
void helper_movl_crN_T0(int reg)
2653 2653
{
2654
#if !defined(CONFIG_USER_ONLY) 
2654
#if !defined(CONFIG_USER_ONLY)
2655 2655
    switch(reg) {
2656 2656
    case 0:
2657 2657
        cpu_x86_update_cr0(env, T0);
......
2695 2695
    EDX = (uint32_t)(val >> 32);
2696 2696
}
2697 2697

  
2698
#if defined(CONFIG_USER_ONLY) 
2698
#if defined(CONFIG_USER_ONLY)
2699 2699
void helper_wrmsr(void)
2700 2700
{
2701 2701
}
......
2735 2735
                update_mask |= MSR_EFER_FFXSR;
2736 2736
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2737 2737
                update_mask |= MSR_EFER_NXE;
2738
            env->efer = (env->efer & ~update_mask) | 
2738
            env->efer = (env->efer & ~update_mask) |
2739 2739
            (val & update_mask);
2740 2740
        }
2741 2741
        break;
......
2767 2767
#endif
2768 2768
    default:
2769 2769
        /* XXX: exception ? */
2770
        break; 
2770
        break;
2771 2771
    }
2772 2772
}
2773 2773

  
......
2819 2819
    default:
2820 2820
        /* XXX: exception ? */
2821 2821
        val = 0;
2822
        break; 
2822
        break;
2823 2823
    }
2824 2824
    EAX = (uint32_t)(val);
2825 2825
    EDX = (uint32_t)(val >> 32);
......
3006 3006

  
3007 3007
CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3008 3008
{
3009
    if (b == 0.0) 
3009
    if (b == 0.0)
3010 3010
        fpu_set_exception(FPUS_ZE);
3011 3011
    return a / b;
3012 3012
}
......
3015 3015
{
3016 3016
    if (env->cr[0] & CR0_NE_MASK) {
3017 3017
        raise_exception(EXCP10_COPR);
3018
    } 
3019
#if !defined(CONFIG_USER_ONLY) 
3018
    }
3019
#if !defined(CONFIG_USER_ONLY)
3020 3020
    else {
3021 3021
        cpu_set_ferr(env);
3022 3022
    }
......
3080 3080
void helper_fyl2x(void)
3081 3081
{
3082 3082
    CPU86_LDouble fptemp;
3083
    
3083
   
3084 3084
    fptemp = ST0;
3085 3085
    if (fptemp>0.0){
3086 3086
        fptemp = log(fptemp)/log(2.0);	 /* log2(ST) */
3087 3087
        ST1 *= fptemp;
3088 3088
        fpop();
3089
    } else { 
3089
    } else {
3090 3090
        env->fpus &= (~0x4700);
3091 3091
        env->fpus |= 0x400;
3092 3092
    }
......
3250 3250
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3251 3251
        ST1 *= fptemp;
3252 3252
        fpop();
3253
    } else { 
3253
    } else {
3254 3254
        env->fpus &= (~0x4700);
3255 3255
        env->fpus |= 0x400;
3256 3256
    }
......
3261 3261
    CPU86_LDouble fptemp;
3262 3262

  
3263 3263
    fptemp = ST0;
3264
    if (fptemp<0.0) { 
3264
    if (fptemp<0.0) {
3265 3265
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3266 3266
        env->fpus |= 0x400;
3267 3267
    }
......
3291 3291

  
3292 3292
void helper_fscale(void)
3293 3293
{
3294
    ST0 = ldexp (ST0, (int)(ST1)); 
3294
    ST0 = ldexp (ST0, (int)(ST1));
3295 3295
}
3296 3296

  
3297 3297
void helper_fsin(void)
......
3490 3490
        helper_fstt(tmp, addr);
3491 3491
        addr += 16;
3492 3492
    }
3493
    
3493
   
3494 3494
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3495 3495
        /* XXX: finish it */
3496 3496
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
......
3823 3823
#endif
3824 3824
}
3825 3825

  
3826
#if !defined(CONFIG_USER_ONLY) 
3826
#if !defined(CONFIG_USER_ONLY)
3827 3827

  
3828 3828
#define MMUSUFFIX _mmu
3829 3829
#define GETPC() (__builtin_return_address(0))

Also available in: Unified diff