Revision 90a9fdae helper-i386.c

b/helper-i386.c
126 126
    longjmp(env->jmp_env, 1);
127 127
}
128 128

  
129
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 
130
                                       uint32_t *esp_ptr, int dpl)
131
{
132
    int type, index, shift;
133
    
129 134
#if 0
130
/* full interrupt support (only useful for real CPU emulation, not
131
   finished) - I won't do it any time soon, finish it if you want ! */
132
void raise_interrupt(int intno, int is_int, int error_code, 
133
                     unsigned int next_eip)
135
    {
136
        int i;
137
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
138
        for(i=0;i<env->tr.limit;i++) {
139
            printf("%02x ", env->tr.base[i]);
140
            if ((i & 7) == 7) printf("\n");
141
        }
142
        printf("\n");
143
    }
144
#endif
145

  
146
    if (!(env->tr.flags & DESC_P_MASK))
147
        cpu_abort(env, "invalid tss");
148
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
149
    if ((type & 7) != 1)
150
        cpu_abort(env, "invalid tss type");
151
    shift = type >> 3;
152
    index = (dpl * 4 + 2) << shift;
153
    if (index + (4 << shift) - 1 > env->tr.limit)
154
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
155
    if (shift == 0) {
156
        *esp_ptr = lduw(env->tr.base + index);
157
        *ss_ptr = lduw(env->tr.base + index + 2);
158
    } else {
159
        *esp_ptr = ldl(env->tr.base + index);
160
        *ss_ptr = lduw(env->tr.base + index + 4);
161
    }
162
}
163

  
164
/* return non zero if error */
165
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
166
                               int selector)
134 167
{
135
    SegmentDescriptorTable *dt;
168
    SegmentCache *dt;
169
    int index;
136 170
    uint8_t *ptr;
137
    int type, dpl, cpl;
138
    uint32_t e1, e2;
139
    
171

  
172
    if (selector & 0x4)
173
        dt = &env->ldt;
174
    else
175
        dt = &env->gdt;
176
    index = selector & ~7;
177
    if ((index + 7) > dt->limit)
178
        return -1;
179
    ptr = dt->base + index;
180
    *e1_ptr = ldl(ptr);
181
    *e2_ptr = ldl(ptr + 4);
182
    return 0;
183
}
184
                                     
185

  
186
/* protected mode interrupt */
187
static void do_interrupt_protected(int intno, int is_int, int error_code,
188
                                      unsigned int next_eip)
189
{
190
    SegmentCache *dt;
191
    uint8_t *ptr, *ssp;
192
    int type, dpl, cpl, selector, ss_dpl;
193
    int has_error_code, new_stack, shift;
194
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
195
    uint32_t old_cs, old_ss, old_esp, old_eip;
196

  
140 197
    dt = &env->idt;
141 198
    if (intno * 8 + 7 > dt->limit)
142 199
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
......
147 204
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
148 205
    switch(type) {
149 206
    case 5: /* task gate */
207
        cpu_abort(env, "task gate not supported");
208
        break;
150 209
    case 6: /* 286 interrupt gate */
151 210
    case 7: /* 286 trap gate */
152 211
    case 14: /* 386 interrupt gate */
......
164 223
    /* check valid bit */
165 224
    if (!(e2 & DESC_P_MASK))
166 225
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
226
    selector = e1 >> 16;
227
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
228
    if ((selector & 0xfffc) == 0)
229
        raise_exception_err(EXCP0D_GPF, 0);
230

  
231
    if (load_segment(&e1, &e2, selector) != 0)
232
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
233
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
234
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
235
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
236
    if (dpl > cpl)
237
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
238
    if (!(e2 & DESC_P_MASK))
239
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
240
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
241
        /* to inner priviledge */
242
        get_ss_esp_from_tss(&ss, &esp, dpl);
243
        if ((ss & 0xfffc) == 0)
244
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
245
        if ((ss & 3) != dpl)
246
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
247
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
248
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
249
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
250
        if (ss_dpl != dpl)
251
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
252
        if (!(ss_e2 & DESC_S_MASK) ||
253
            (ss_e2 & DESC_CS_MASK) ||
254
            !(ss_e2 & DESC_W_MASK))
255
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
256
        if (!(ss_e2 & DESC_P_MASK))
257
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
258
        new_stack = 1;
259
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
260
        /* to same priviledge */
261
        new_stack = 0;
262
    } else {
263
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
264
        new_stack = 0; /* avoid warning */
265
    }
266

  
267
    shift = type >> 3;
268
    has_error_code = 0;
269
    if (!is_int) {
270
        switch(intno) {
271
        case 8:
272
        case 10:
273
        case 11:
274
        case 12:
275
        case 13:
276
        case 14:
277
        case 17:
278
            has_error_code = 1;
279
            break;
280
        }
281
    }
282
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
283
    if (env->eflags & VM_MASK)
284
        push_size += 8;
285
    push_size <<= shift;
286

  
287
    /* XXX: check that enough room is available */
288
    if (new_stack) {
289
        old_esp = env->regs[R_ESP];
290
        old_ss = env->segs[R_SS].selector;
291
        load_seg(R_SS, ss, env->eip);
292
    } else {
293
        old_esp = 0;
294
        old_ss = 0;
295
        esp = env->regs[R_ESP];
296
    }
297
    if (is_int)
298
        old_eip = next_eip;
299
    else
300
        old_eip = env->eip;
301
    old_cs = env->segs[R_CS].selector;
302
    load_seg(R_CS, selector, env->eip);
303
    env->eip = offset;
304
    env->regs[R_ESP] = esp - push_size;
305
    ssp = env->segs[R_SS].base + esp;
306
    if (shift == 1) {
307
        int old_eflags;
308
        if (env->eflags & VM_MASK) {
309
            ssp -= 4;
310
            stl(ssp, env->segs[R_GS].selector);
311
            ssp -= 4;
312
            stl(ssp, env->segs[R_FS].selector);
313
            ssp -= 4;
314
            stl(ssp, env->segs[R_DS].selector);
315
            ssp -= 4;
316
            stl(ssp, env->segs[R_ES].selector);
317
        }
318
        if (new_stack) {
319
            ssp -= 4;
320
            stl(ssp, old_ss);
321
            ssp -= 4;
322
            stl(ssp, old_esp);
323
        }
324
        ssp -= 4;
325
        old_eflags = compute_eflags();
326
        stl(ssp, old_eflags);
327
        ssp -= 4;
328
        stl(ssp, old_cs);
329
        ssp -= 4;
330
        stl(ssp, old_eip);
331
        if (has_error_code) {
332
            ssp -= 4;
333
            stl(ssp, error_code);
334
        }
335
    } else {
336
        if (new_stack) {
337
            ssp -= 2;
338
            stw(ssp, old_ss);
339
            ssp -= 2;
340
            stw(ssp, old_esp);
341
        }
342
        ssp -= 2;
343
        stw(ssp, compute_eflags());
344
        ssp -= 2;
345
        stw(ssp, old_cs);
346
        ssp -= 2;
347
        stw(ssp, old_eip);
348
        if (has_error_code) {
349
            ssp -= 2;
350
            stw(ssp, error_code);
351
        }
352
    }
353
    
354
    /* interrupt gate clear IF mask */
355
    if ((type & 1) == 0) {
356
        env->eflags &= ~IF_MASK;
357
    }
358
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
167 359
}
168 360

  
169
#else
361
/* real mode interrupt */
362
static void do_interrupt_real(int intno, int is_int, int error_code,
363
                                 unsigned int next_eip)
364
{
365
    SegmentCache *dt;
366
    uint8_t *ptr, *ssp;
367
    int selector;
368
    uint32_t offset, esp;
369
    uint32_t old_cs, old_eip;
170 370

  
171
/*
172
 * is_int is TRUE if coming from the int instruction. next_eip is the
173
 * EIP value AFTER the interrupt instruction. It is only relevant if
174
 * is_int is TRUE.  
175
 */
176
void raise_interrupt(int intno, int is_int, int error_code, 
177
                     unsigned int next_eip)
371
    /* real mode (simpler !) */
372
    dt = &env->idt;
373
    if (intno * 4 + 3 > dt->limit)
374
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
375
    ptr = dt->base + intno * 4;
376
    offset = lduw(ptr);
377
    selector = lduw(ptr + 2);
378
    esp = env->regs[R_ESP] & 0xffff;
379
    ssp = env->segs[R_SS].base + esp;
380
    if (is_int)
381
        old_eip = next_eip;
382
    else
383
        old_eip = env->eip;
384
    old_cs = env->segs[R_CS].selector;
385
    ssp -= 2;
386
    stw(ssp, compute_eflags());
387
    ssp -= 2;
388
    stw(ssp, old_cs);
389
    ssp -= 2;
390
    stw(ssp, old_eip);
391
    esp -= 6;
392
    
393
    /* update processor state */
394
    env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
395
    env->eip = offset;
396
    env->segs[R_CS].selector = selector;
397
    env->segs[R_CS].base = (uint8_t *)(selector << 4);
398
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
399
}
400

  
401
/* fake user mode interrupt */
402
void do_interrupt_user(int intno, int is_int, int error_code, 
403
                       unsigned int next_eip)
178 404
{
179 405
    SegmentCache *dt;
180 406
    uint8_t *ptr;
......
196 422
       code */
197 423
    if (is_int)
198 424
        EIP = next_eip;
425
}
426

  
427
/*
428
 * Begin excution of an interruption. is_int is TRUE if coming from
429
 * the int instruction. next_eip is the EIP value AFTER the interrupt
430
 * instruction. It is only relevant if is_int is TRUE.  
431
 */
432
void do_interrupt(int intno, int is_int, int error_code, 
433
                  unsigned int next_eip)
434
{
435
    if (env->cr[0] & CR0_PE_MASK) {
436
        do_interrupt_protected(intno, is_int, error_code, next_eip);
437
    } else {
438
        do_interrupt_real(intno, is_int, error_code, next_eip);
439
    }
440
}
441

  
442
/*
443
 * Signal an interruption. It is executed in the main CPU loop.
444
 * is_int is TRUE if coming from the int instruction. next_eip is the
445
 * EIP value AFTER the interrupt instruction. It is only relevant if
446
 * is_int is TRUE.  
447
 */
448
void raise_interrupt(int intno, int is_int, int error_code, 
449
                     unsigned int next_eip)
450
{
199 451
    env->exception_index = intno;
200 452
    env->error_code = error_code;
201

  
453
    env->exception_is_int = is_int;
454
    env->exception_next_eip = next_eip;
202 455
    cpu_loop_exit();
203 456
}
204 457

  
205
#endif
206

  
207 458
/* shortcuts to generate exceptions */
208 459
void raise_exception_err(int exception_index, int error_code)
209 460
{
......
335 586
{
336 587
    sc->base = (void *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
337 588
    sc->limit = (e1 & 0xffff) | (e2 & 0x000f0000);
338
    if (e2 & (1 << 23))
589
    if (e2 & DESC_G_MASK)
339 590
        sc->limit = (sc->limit << 12) | 0xfff;
340
    sc->seg_32bit = (e2 >> 22) & 1;
591
    sc->flags = e2;
341 592
}
342 593

  
343 594
void helper_lldt_T0(void)
......
382 633
    
383 634
    selector = T0 & 0xffff;
384 635
    if ((selector & 0xfffc) == 0) {
385
        /* XXX: NULL selector case: invalid LDT */
636
        /* NULL selector case: invalid LDT */
386 637
        env->tr.base = NULL;
387 638
        env->tr.limit = 0;
639
        env->tr.flags = 0;
388 640
    } else {
389 641
        if (selector & 0x4)
390 642
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
......
412 664
void load_seg(int seg_reg, int selector, unsigned int cur_eip)
413 665
{
414 666
    SegmentCache *sc;
415
    SegmentCache *dt;
416
    int index;
417 667
    uint32_t e1, e2;
418
    uint8_t *ptr;
419 668
    
420 669
    sc = &env->segs[seg_reg];
421 670
    if ((selector & 0xfffc) == 0) {
......
427 676
            /* XXX: each access should trigger an exception */
428 677
            sc->base = NULL;
429 678
            sc->limit = 0;
430
            sc->seg_32bit = 1;
679
            sc->flags = 0;
431 680
        }
432 681
    } else {
433
        if (selector & 0x4)
434
            dt = &env->ldt;
435
        else
436
            dt = &env->gdt;
437
        index = selector & ~7;
438
        if ((index + 7) > dt->limit) {
682
        if (load_segment(&e1, &e2, selector) != 0) {
439 683
            EIP = cur_eip;
440 684
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
441 685
        }
442
        ptr = dt->base + index;
443
        e1 = ldl(ptr);
444
        e2 = ldl(ptr + 4);
445 686
        if (!(e2 & DESC_S_MASK) ||
446 687
            (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
447 688
            EIP = cur_eip;
......
469 710
        }
470 711
        load_seg_cache(sc, e1, e2);
471 712
#if 0
472
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx seg_32bit=%d\n", 
473
                selector, (unsigned long)sc->base, sc->limit, sc->seg_32bit);
713
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
714
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
474 715
#endif
475 716
    }
476 717
    sc->selector = selector;
......
480 721
void jmp_seg(int selector, unsigned int new_eip)
481 722
{
482 723
    SegmentCache sc1;
483
    SegmentCache *dt;
484
    int index;
485 724
    uint32_t e1, e2, cpl, dpl, rpl;
486
    uint8_t *ptr;
487 725

  
488 726
    if ((selector & 0xfffc) == 0) {
489 727
        raise_exception_err(EXCP0D_GPF, 0);
490 728
    }
491 729

  
492
    if (selector & 0x4)
493
      dt = &env->ldt;
494
    else
495
      dt = &env->gdt;
496
    index = selector & ~7;
497
    if ((index + 7) > dt->limit)
730
    if (load_segment(&e1, &e2, selector) != 0)
498 731
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
499
    ptr = dt->base + index;
500
    e1 = ldl(ptr);
501
    e2 = ldl(ptr + 4);
502 732
    cpl = env->segs[R_CS].selector & 3;
503 733
    if (e2 & DESC_S_MASK) {
504 734
        if (!(e2 & DESC_CS_MASK))
......
530 760
    }
531 761
}
532 762

  
533
/* XXX: do more */
763
/* init the segment cache in vm86 mode */
764
static inline void load_seg_vm(int seg, int selector)
765
{
766
    SegmentCache *sc = &env->segs[seg];
767
    selector &= 0xffff;
768
    sc->base = (uint8_t *)(selector << 4);
769
    sc->selector = selector;
770
    sc->flags = 0;
771
    sc->limit = 0xffff;
772
}
773

  
774
/* protected mode iret */
775
void helper_iret_protected(int shift)
776
{
777
    uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
778
    uint32_t new_es, new_ds, new_fs, new_gs;
779
    uint32_t e1, e2;
780
    int cpl, dpl, rpl, eflags_mask;
781
    uint8_t *ssp;
782
    
783
    sp = env->regs[R_ESP];
784
    if (!(env->segs[R_SS].flags & DESC_B_MASK))
785
        sp &= 0xffff;
786
    ssp = env->segs[R_SS].base + sp;
787
    if (shift == 1) {
788
        /* 32 bits */
789
        new_eflags = ldl(ssp + 8);
790
        new_cs = ldl(ssp + 4) & 0xffff;
791
        new_eip = ldl(ssp);
792
        if (new_eflags & VM_MASK)
793
            goto return_to_vm86;
794
    } else {
795
        /* 16 bits */
796
        new_eflags = lduw(ssp + 4);
797
        new_cs = lduw(ssp + 2);
798
        new_eip = lduw(ssp);
799
    }
800
    if ((new_cs & 0xfffc) == 0)
801
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
802
    if (load_segment(&e1, &e2, new_cs) != 0)
803
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
804
    if (!(e2 & DESC_S_MASK) ||
805
        !(e2 & DESC_CS_MASK))
806
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
807
    cpl = env->segs[R_CS].selector & 3;
808
    rpl = new_cs & 3; 
809
    if (rpl < cpl)
810
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
811
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
812
    if (e2 & DESC_CS_MASK) {
813
        if (dpl > rpl)
814
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
815
    } else {
816
        if (dpl != rpl)
817
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
818
    }
819
    if (!(e2 & DESC_P_MASK))
820
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
821
    
822
    if (rpl == cpl) {
823
        /* return to same priledge level */
824
        load_seg(R_CS, new_cs, env->eip);
825
        new_esp = sp + (6 << shift);
826
    } else {
827
        /* return to differentr priviledge level */
828
        if (shift == 1) {
829
            /* 32 bits */
830
            new_esp = ldl(ssp + 12);
831
            new_ss = ldl(ssp + 16) & 0xffff;
832
        } else {
833
            /* 16 bits */
834
            new_esp = lduw(ssp + 6);
835
            new_ss = lduw(ssp + 8);
836
        }
837
        
838
        if ((new_ss & 3) != rpl)
839
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
840
        if (load_segment(&e1, &e2, new_ss) != 0)
841
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
842
        if (!(e2 & DESC_S_MASK) ||
843
            (e2 & DESC_CS_MASK) ||
844
            !(e2 & DESC_W_MASK))
845
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
846
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
847
        if (dpl != rpl)
848
            raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
849
        if (!(e2 & DESC_P_MASK))
850
            raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
851

  
852
        load_seg(R_CS, new_cs, env->eip);
853
        load_seg(R_SS, new_ss, env->eip);
854
    }
855
    if (env->segs[R_SS].flags & DESC_B_MASK)
856
        env->regs[R_ESP] = new_esp;
857
    else
858
        env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | 
859
            (new_esp & 0xffff);
860
    env->eip = new_eip;
861
    if (cpl == 0)
862
        eflags_mask = FL_UPDATE_CPL0_MASK;
863
    else
864
        eflags_mask = FL_UPDATE_MASK32;
865
    if (shift == 0)
866
        eflags_mask &= 0xffff;
867
    load_eflags(new_eflags, eflags_mask);
868
    return;
869

  
870
 return_to_vm86:
871
    new_esp = ldl(ssp + 12);
872
    new_ss = ldl(ssp + 16);
873
    new_es = ldl(ssp + 20);
874
    new_ds = ldl(ssp + 24);
875
    new_fs = ldl(ssp + 28);
876
    new_gs = ldl(ssp + 32);
877
    
878
    /* modify processor state */
879
    load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
880
    load_seg_vm(R_CS, new_cs);
881
    load_seg_vm(R_SS, new_ss);
882
    load_seg_vm(R_ES, new_es);
883
    load_seg_vm(R_DS, new_ds);
884
    load_seg_vm(R_FS, new_fs);
885
    load_seg_vm(R_GS, new_gs);
886
    
887
    env->eip = new_eip;
888
    env->regs[R_ESP] = new_esp;
889
}
890

  
534 891
void helper_movl_crN_T0(int reg)
535 892
{
893
    env->cr[reg] = T0;
536 894
    switch(reg) {
537 895
    case 0:
538
    default:
539
        env->cr[0] = reg;
540
        break;
541
    case 2:
542
        env->cr[2] = reg;
896
        cpu_x86_update_cr0(env);
543 897
        break;
544 898
    case 3:
545
        env->cr[3] = reg;
546
        break;
547
    case 4:
548
        env->cr[4] = reg;
899
        cpu_x86_update_cr3(env);
549 900
        break;
550 901
    }
551 902
}
......
556 907
    env->dr[reg] = T0;
557 908
}
558 909

  
910
void helper_invlpg(unsigned int addr)
911
{
912
    cpu_x86_flush_tlb(env, addr);
913
}
914

  
559 915
/* rdtsc */
560 916
#ifndef __i386__
561 917
uint64_t emu_time;
......
577 933
void helper_lsl(void)
578 934
{
579 935
    unsigned int selector, limit;
580
    SegmentCache *dt;
581
    int index;
582 936
    uint32_t e1, e2;
583
    uint8_t *ptr;
584 937

  
585 938
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
586 939
    selector = T0 & 0xffff;
587
    if (selector & 0x4)
588
        dt = &env->ldt;
589
    else
590
        dt = &env->gdt;
591
    index = selector & ~7;
592
    if ((index + 7) > dt->limit)
940
    if (load_segment(&e1, &e2, selector) != 0)
593 941
        return;
594
    ptr = dt->base + index;
595
    e1 = ldl(ptr);
596
    e2 = ldl(ptr + 4);
597 942
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
598 943
    if (e2 & (1 << 23))
599 944
        limit = (limit << 12) | 0xfff;
......
604 949
void helper_lar(void)
605 950
{
606 951
    unsigned int selector;
607
    SegmentCache *dt;
608
    int index;
609
    uint32_t e2;
610
    uint8_t *ptr;
952
    uint32_t e1, e2;
611 953

  
612 954
    CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
613 955
    selector = T0 & 0xffff;
614
    if (selector & 0x4)
615
        dt = &env->ldt;
616
    else
617
        dt = &env->gdt;
618
    index = selector & ~7;
619
    if ((index + 7) > dt->limit)
956
    if (load_segment(&e1, &e2, selector) != 0)
620 957
        return;
621
    ptr = dt->base + index;
622
    e2 = ldl(ptr + 4);
623 958
    T1 = e2 & 0x00f0ff00;
624 959
    CC_SRC |= CC_Z;
625 960
}

Also available in: Unified diff