Statistics
| Branch: | Revision:

root / target-sh4 / helper.c @ 43dc2a64

History | View | Annotate | Download (17.1 kB)

1
/*
2
 *  SH4 emulation
3
 *
4
 *  Copyright (c) 2005 Samuel Tardieu
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdarg.h>
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <string.h>
23
#include <inttypes.h>
24
#include <signal.h>
25

    
26
#include "cpu.h"
27
#include "exec-all.h"
28
#include "hw/sh_intc.h"
29

    
30
#if defined(CONFIG_USER_ONLY)
31

    
32
void do_interrupt (CPUState *env)
33
{
34
  env->exception_index = -1;
35
}
36

    
37
int cpu_sh4_handle_mmu_fault(CPUState * env, target_ulong address, int rw,
38
                             int mmu_idx, int is_softmmu)
39
{
40
    env->tea = address;
41
    env->exception_index = -1;
42
    switch (rw) {
43
    case 0:
44
        env->exception_index = 0x0a0;
45
        break;
46
    case 1:
47
        env->exception_index = 0x0c0;
48
        break;
49
    case 2:
50
        env->exception_index = 0x0a0;
51
        break;
52
    }
53
    return 1;
54
}
55

    
56
int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
57
{
58
    /* For user mode, only U0 area is cachable. */
59
    return !(addr & 0x80000000);
60
}
61

    
62
#else /* !CONFIG_USER_ONLY */
63

    
64
#define MMU_OK                   0
65
#define MMU_ITLB_MISS            (-1)
66
#define MMU_ITLB_MULTIPLE        (-2)
67
#define MMU_ITLB_VIOLATION       (-3)
68
#define MMU_DTLB_MISS_READ       (-4)
69
#define MMU_DTLB_MISS_WRITE      (-5)
70
#define MMU_DTLB_INITIAL_WRITE   (-6)
71
#define MMU_DTLB_VIOLATION_READ  (-7)
72
#define MMU_DTLB_VIOLATION_WRITE (-8)
73
#define MMU_DTLB_MULTIPLE        (-9)
74
#define MMU_DTLB_MISS            (-10)
75
#define MMU_IADDR_ERROR          (-11)
76
#define MMU_DADDR_ERROR_READ     (-12)
77
#define MMU_DADDR_ERROR_WRITE    (-13)
78

    
79
void do_interrupt(CPUState * env)
80
{
81
    int do_irq = env->interrupt_request & CPU_INTERRUPT_HARD;
82
    int do_exp, irq_vector = env->exception_index;
83

    
84
    /* prioritize exceptions over interrupts */
85

    
86
    do_exp = env->exception_index != -1;
87
    do_irq = do_irq && (env->exception_index == -1);
88

    
89
    if (env->sr & SR_BL) {
90
        if (do_exp && env->exception_index != 0x1e0) {
91
            env->exception_index = 0x000; /* masked exception -> reset */
92
        }
93
        if (do_irq && !env->intr_at_halt) {
94
            return; /* masked */
95
        }
96
        env->intr_at_halt = 0;
97
    }
98

    
99
    if (do_irq) {
100
        irq_vector = sh_intc_get_pending_vector(env->intc_handle,
101
                                                (env->sr >> 4) & 0xf);
102
        if (irq_vector == -1) {
103
            return; /* masked */
104
        }
105
    }
106

    
107
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
108
        const char *expname;
109
        switch (env->exception_index) {
110
        case 0x0e0:
111
            expname = "addr_error";
112
            break;
113
        case 0x040:
114
            expname = "tlb_miss";
115
            break;
116
        case 0x0a0:
117
            expname = "tlb_violation";
118
            break;
119
        case 0x180:
120
            expname = "illegal_instruction";
121
            break;
122
        case 0x1a0:
123
            expname = "slot_illegal_instruction";
124
            break;
125
        case 0x800:
126
            expname = "fpu_disable";
127
            break;
128
        case 0x820:
129
            expname = "slot_fpu";
130
            break;
131
        case 0x100:
132
            expname = "data_write";
133
            break;
134
        case 0x060:
135
            expname = "dtlb_miss_write";
136
            break;
137
        case 0x0c0:
138
            expname = "dtlb_violation_write";
139
            break;
140
        case 0x120:
141
            expname = "fpu_exception";
142
            break;
143
        case 0x080:
144
            expname = "initial_page_write";
145
            break;
146
        case 0x160:
147
            expname = "trapa";
148
            break;
149
        default:
150
            expname = do_irq ? "interrupt" : "???";
151
            break;
152
        }
153
        qemu_log("exception 0x%03x [%s] raised\n",
154
                  irq_vector, expname);
155
        log_cpu_state(env, 0);
156
    }
157

    
158
    env->ssr = env->sr;
159
    env->spc = env->pc;
160
    env->sgr = env->gregs[15];
161
    env->sr |= SR_BL | SR_MD | SR_RB;
162

    
163
    if (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
164
        /* Branch instruction should be executed again before delay slot. */
165
        env->spc -= 2;
166
        /* Clear flags for exception/interrupt routine. */
167
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL | DELAY_SLOT_TRUE);
168
    }
169
    if (env->flags & DELAY_SLOT_CLEARME)
170
        env->flags = 0;
171

    
172
    if (do_exp) {
173
        env->expevt = env->exception_index;
174
        switch (env->exception_index) {
175
        case 0x000:
176
        case 0x020:
177
        case 0x140:
178
            env->sr &= ~SR_FD;
179
            env->sr |= 0xf << 4; /* IMASK */
180
            env->pc = 0xa0000000;
181
            break;
182
        case 0x040:
183
        case 0x060:
184
            env->pc = env->vbr + 0x400;
185
            break;
186
        case 0x160:
187
            env->spc += 2; /* special case for TRAPA */
188
            /* fall through */
189
        default:
190
            env->pc = env->vbr + 0x100;
191
            break;
192
        }
193
        return;
194
    }
195

    
196
    if (do_irq) {
197
        env->intevt = irq_vector;
198
        env->pc = env->vbr + 0x600;
199
        return;
200
    }
201
}
202

    
203
static void update_itlb_use(CPUState * env, int itlbnb)
204
{
205
    uint8_t or_mask = 0, and_mask = (uint8_t) - 1;
206

    
207
    switch (itlbnb) {
208
    case 0:
209
        and_mask = 0x1f;
210
        break;
211
    case 1:
212
        and_mask = 0xe7;
213
        or_mask = 0x80;
214
        break;
215
    case 2:
216
        and_mask = 0xfb;
217
        or_mask = 0x50;
218
        break;
219
    case 3:
220
        or_mask = 0x2c;
221
        break;
222
    }
223

    
224
    env->mmucr &= (and_mask << 24) | 0x00ffffff;
225
    env->mmucr |= (or_mask << 24);
226
}
227

    
228
static int itlb_replacement(CPUState * env)
229
{
230
    if ((env->mmucr & 0xe0000000) == 0xe0000000)
231
        return 0;
232
    if ((env->mmucr & 0x98000000) == 0x18000000)
233
        return 1;
234
    if ((env->mmucr & 0x54000000) == 0x04000000)
235
        return 2;
236
    if ((env->mmucr & 0x2c000000) == 0x00000000)
237
        return 3;
238
    cpu_abort(env, "Unhandled itlb_replacement");
239
}
240

    
241
/* Find the corresponding entry in the right TLB
242
   Return entry, MMU_DTLB_MISS or MMU_DTLB_MULTIPLE
243
*/
244
static int find_tlb_entry(CPUState * env, target_ulong address,
245
                          tlb_t * entries, uint8_t nbtlb, int use_asid)
246
{
247
    int match = MMU_DTLB_MISS;
248
    uint32_t start, end;
249
    uint8_t asid;
250
    int i;
251

    
252
    asid = env->pteh & 0xff;
253

    
254
    for (i = 0; i < nbtlb; i++) {
255
        if (!entries[i].v)
256
            continue;                /* Invalid entry */
257
        if (!entries[i].sh && use_asid && entries[i].asid != asid)
258
            continue;                /* Bad ASID */
259
        start = (entries[i].vpn << 10) & ~(entries[i].size - 1);
260
        end = start + entries[i].size - 1;
261
        if (address >= start && address <= end) {        /* Match */
262
            if (match != MMU_DTLB_MISS)
263
                return MMU_DTLB_MULTIPLE;        /* Multiple match */
264
            match = i;
265
        }
266
    }
267
    return match;
268
}
269

    
270
static void increment_urc(CPUState * env)
271
{
272
    uint8_t urb, urc;
273

    
274
    /* Increment URC */
275
    urb = ((env->mmucr) >> 18) & 0x3f;
276
    urc = ((env->mmucr) >> 10) & 0x3f;
277
    urc++;
278
    if ((urb > 0 && urc > urb) || urc > (UTLB_SIZE - 1))
279
        urc = 0;
280
    env->mmucr = (env->mmucr & 0xffff03ff) | (urc << 10);
281
}
282

    
283
/* Find itlb entry - update itlb from utlb if necessary and asked for
284
   Return entry, MMU_ITLB_MISS, MMU_ITLB_MULTIPLE or MMU_DTLB_MULTIPLE
285
   Update the itlb from utlb if update is not 0
286
*/
287
static int find_itlb_entry(CPUState * env, target_ulong address,
288
                           int use_asid, int update)
289
{
290
    int e, n;
291

    
292
    e = find_tlb_entry(env, address, env->itlb, ITLB_SIZE, use_asid);
293
    if (e == MMU_DTLB_MULTIPLE)
294
        e = MMU_ITLB_MULTIPLE;
295
    else if (e == MMU_DTLB_MISS && update) {
296
        e = find_tlb_entry(env, address, env->utlb, UTLB_SIZE, use_asid);
297
        if (e >= 0) {
298
            tlb_t * ientry;
299
            n = itlb_replacement(env);
300
            ientry = &env->itlb[n];
301
            if (ientry->v) {
302
                tlb_flush_page(env, ientry->vpn << 10);
303
            }
304
            *ientry = env->utlb[e];
305
            e = n;
306
        } else if (e == MMU_DTLB_MISS)
307
            e = MMU_ITLB_MISS;
308
    } else if (e == MMU_DTLB_MISS)
309
        e = MMU_ITLB_MISS;
310
    if (e >= 0)
311
        update_itlb_use(env, e);
312
    return e;
313
}
314

    
315
/* Find utlb entry
316
   Return entry, MMU_DTLB_MISS, MMU_DTLB_MULTIPLE */
317
static int find_utlb_entry(CPUState * env, target_ulong address, int use_asid)
318
{
319
    /* per utlb access */
320
    increment_urc(env);
321

    
322
    /* Return entry */
323
    return find_tlb_entry(env, address, env->utlb, UTLB_SIZE, use_asid);
324
}
325

    
326
/* Match address against MMU
327
   Return MMU_OK, MMU_DTLB_MISS_READ, MMU_DTLB_MISS_WRITE,
328
   MMU_DTLB_INITIAL_WRITE, MMU_DTLB_VIOLATION_READ,
329
   MMU_DTLB_VIOLATION_WRITE, MMU_ITLB_MISS,
330
   MMU_ITLB_MULTIPLE, MMU_ITLB_VIOLATION,
331
   MMU_IADDR_ERROR, MMU_DADDR_ERROR_READ, MMU_DADDR_ERROR_WRITE.
332
*/
333
static int get_mmu_address(CPUState * env, target_ulong * physical,
334
                           int *prot, target_ulong address,
335
                           int rw, int access_type)
336
{
337
    int use_asid, n;
338
    tlb_t *matching = NULL;
339

    
340
    use_asid = (env->mmucr & MMUCR_SV) == 0 || (env->sr & SR_MD) == 0;
341

    
342
    if (rw == 2) {
343
        n = find_itlb_entry(env, address, use_asid, 1);
344
        if (n >= 0) {
345
            matching = &env->itlb[n];
346
            if (!(env->sr & SR_MD) && !(matching->pr & 2))
347
                n = MMU_ITLB_VIOLATION;
348
            else
349
                *prot = PAGE_EXEC;
350
        }
351
    } else {
352
        n = find_utlb_entry(env, address, use_asid);
353
        if (n >= 0) {
354
            matching = &env->utlb[n];
355
            if (!(env->sr & SR_MD) && !(matching->pr & 2)) {
356
                n = (rw == 1) ? MMU_DTLB_VIOLATION_WRITE :
357
                    MMU_DTLB_VIOLATION_READ;
358
            } else if ((rw == 1) && !(matching->pr & 1)) {
359
                n = MMU_DTLB_VIOLATION_WRITE;
360
            } else if ((rw == 1) & !matching->d) {
361
                n = MMU_DTLB_INITIAL_WRITE;
362
            } else {
363
                *prot = PAGE_READ;
364
                if ((matching->pr & 1) && matching->d) {
365
                    *prot |= PAGE_WRITE;
366
                }
367
            }
368
        } else if (n == MMU_DTLB_MISS) {
369
            n = (rw == 1) ? MMU_DTLB_MISS_WRITE :
370
                MMU_DTLB_MISS_READ;
371
        }
372
    }
373
    if (n >= 0) {
374
        n = MMU_OK;
375
        *physical = ((matching->ppn << 10) & ~(matching->size - 1)) |
376
            (address & (matching->size - 1));
377
    }
378
    return n;
379
}
380

    
381
static int get_physical_address(CPUState * env, target_ulong * physical,
382
                                int *prot, target_ulong address,
383
                                int rw, int access_type)
384
{
385
    /* P1, P2 and P4 areas do not use translation */
386
    if ((address >= 0x80000000 && address < 0xc0000000) ||
387
        address >= 0xe0000000) {
388
        if (!(env->sr & SR_MD)
389
            && (address < 0xe0000000 || address >= 0xe4000000)) {
390
            /* Unauthorized access in user mode (only store queues are available) */
391
            fprintf(stderr, "Unauthorized access\n");
392
            if (rw == 0)
393
                return MMU_DADDR_ERROR_READ;
394
            else if (rw == 1)
395
                return MMU_DADDR_ERROR_WRITE;
396
            else
397
                return MMU_IADDR_ERROR;
398
        }
399
        if (address >= 0x80000000 && address < 0xc0000000) {
400
            /* Mask upper 3 bits for P1 and P2 areas */
401
            *physical = address & 0x1fffffff;
402
        } else {
403
            *physical = address;
404
        }
405
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
406
        return MMU_OK;
407
    }
408

    
409
    /* If MMU is disabled, return the corresponding physical page */
410
    if (!env->mmucr & MMUCR_AT) {
411
        *physical = address & 0x1FFFFFFF;
412
        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
413
        return MMU_OK;
414
    }
415

    
416
    /* We need to resort to the MMU */
417
    return get_mmu_address(env, physical, prot, address, rw, access_type);
418
}
419

    
420
int cpu_sh4_handle_mmu_fault(CPUState * env, target_ulong address, int rw,
421
                             int mmu_idx, int is_softmmu)
422
{
423
    target_ulong physical;
424
    int prot, ret, access_type;
425

    
426
    access_type = ACCESS_INT;
427
    ret =
428
        get_physical_address(env, &physical, &prot, address, rw,
429
                             access_type);
430

    
431
    if (ret != MMU_OK) {
432
        env->tea = address;
433
        switch (ret) {
434
        case MMU_ITLB_MISS:
435
        case MMU_DTLB_MISS_READ:
436
            env->exception_index = 0x040;
437
            break;
438
        case MMU_DTLB_MULTIPLE:
439
        case MMU_ITLB_MULTIPLE:
440
            env->exception_index = 0x140;
441
            break;
442
        case MMU_ITLB_VIOLATION:
443
            env->exception_index = 0x0a0;
444
            break;
445
        case MMU_DTLB_MISS_WRITE:
446
            env->exception_index = 0x060;
447
            break;
448
        case MMU_DTLB_INITIAL_WRITE:
449
            env->exception_index = 0x080;
450
            break;
451
        case MMU_DTLB_VIOLATION_READ:
452
            env->exception_index = 0x0a0;
453
            break;
454
        case MMU_DTLB_VIOLATION_WRITE:
455
            env->exception_index = 0x0c0;
456
            break;
457
        case MMU_IADDR_ERROR:
458
        case MMU_DADDR_ERROR_READ:
459
            env->exception_index = 0x0c0;
460
            break;
461
        case MMU_DADDR_ERROR_WRITE:
462
            env->exception_index = 0x100;
463
            break;
464
        default:
465
            cpu_abort(env, "Unhandled MMU fault");
466
        }
467
        return 1;
468
    }
469

    
470
    address &= TARGET_PAGE_MASK;
471
    physical &= TARGET_PAGE_MASK;
472

    
473
    tlb_set_page(env, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
474
    return 0;
475
}
476

    
477
target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr)
478
{
479
    target_ulong physical;
480
    int prot;
481

    
482
    get_physical_address(env, &physical, &prot, addr, 0, 0);
483
    return physical;
484
}
485

    
486
void cpu_load_tlb(CPUSH4State * env)
487
{
488
    int n = cpu_mmucr_urc(env->mmucr);
489
    tlb_t * entry = &env->utlb[n];
490

    
491
    if (entry->v) {
492
        /* Overwriting valid entry in utlb. */
493
        target_ulong address = entry->vpn << 10;
494
        tlb_flush_page(env, address);
495
    }
496

    
497
    /* Take values into cpu status from registers. */
498
    entry->asid = (uint8_t)cpu_pteh_asid(env->pteh);
499
    entry->vpn  = cpu_pteh_vpn(env->pteh);
500
    entry->v    = (uint8_t)cpu_ptel_v(env->ptel);
501
    entry->ppn  = cpu_ptel_ppn(env->ptel);
502
    entry->sz   = (uint8_t)cpu_ptel_sz(env->ptel);
503
    switch (entry->sz) {
504
    case 0: /* 00 */
505
        entry->size = 1024; /* 1K */
506
        break;
507
    case 1: /* 01 */
508
        entry->size = 1024 * 4; /* 4K */
509
        break;
510
    case 2: /* 10 */
511
        entry->size = 1024 * 64; /* 64K */
512
        break;
513
    case 3: /* 11 */
514
        entry->size = 1024 * 1024; /* 1M */
515
        break;
516
    default:
517
        cpu_abort(env, "Unhandled load_tlb");
518
        break;
519
    }
520
    entry->sh   = (uint8_t)cpu_ptel_sh(env->ptel);
521
    entry->c    = (uint8_t)cpu_ptel_c(env->ptel);
522
    entry->pr   = (uint8_t)cpu_ptel_pr(env->ptel);
523
    entry->d    = (uint8_t)cpu_ptel_d(env->ptel);
524
    entry->wt   = (uint8_t)cpu_ptel_wt(env->ptel);
525
    entry->sa   = (uint8_t)cpu_ptea_sa(env->ptea);
526
    entry->tc   = (uint8_t)cpu_ptea_tc(env->ptea);
527
}
528

    
529
 void cpu_sh4_invalidate_tlb(CPUSH4State *s)
530
{
531
    int i;
532

    
533
    /* UTLB */
534
    for (i = 0; i < UTLB_SIZE; i++) {
535
        tlb_t * entry = &s->utlb[i];
536
        entry->v = 0;
537
    }
538
    /* ITLB */
539
    for (i = 0; i < UTLB_SIZE; i++) {
540
        tlb_t * entry = &s->utlb[i];
541
        entry->v = 0;
542
    }
543

    
544
    tlb_flush(s, 1);
545
}
546

    
547
void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, target_phys_addr_t addr,
548
                                    uint32_t mem_value)
549
{
550
    int associate = addr & 0x0000080;
551
    uint32_t vpn = (mem_value & 0xfffffc00) >> 10;
552
    uint8_t d = (uint8_t)((mem_value & 0x00000200) >> 9);
553
    uint8_t v = (uint8_t)((mem_value & 0x00000100) >> 8);
554
    uint8_t asid = (uint8_t)(mem_value & 0x000000ff);
555
    int use_asid = (s->mmucr & MMUCR_SV) == 0 || (s->sr & SR_MD) == 0;
556

    
557
    if (associate) {
558
        int i;
559
        tlb_t * utlb_match_entry = NULL;
560
        int needs_tlb_flush = 0;
561

    
562
        /* search UTLB */
563
        for (i = 0; i < UTLB_SIZE; i++) {
564
            tlb_t * entry = &s->utlb[i];
565
            if (!entry->v)
566
                continue;
567

    
568
            if (entry->vpn == vpn
569
                && (!use_asid || entry->asid == asid || entry->sh)) {
570
                if (utlb_match_entry) {
571
                    /* Multiple TLB Exception */
572
                    s->exception_index = 0x140;
573
                    s->tea = addr;
574
                    break;
575
                }
576
                if (entry->v && !v)
577
                    needs_tlb_flush = 1;
578
                entry->v = v;
579
                entry->d = d;
580
                utlb_match_entry = entry;
581
            }
582
            increment_urc(s); /* per utlb access */
583
        }
584

    
585
        /* search ITLB */
586
        for (i = 0; i < ITLB_SIZE; i++) {
587
            tlb_t * entry = &s->itlb[i];
588
            if (entry->vpn == vpn
589
                && (!use_asid || entry->asid == asid || entry->sh)) {
590
                if (entry->v && !v)
591
                    needs_tlb_flush = 1;
592
                if (utlb_match_entry)
593
                    *entry = *utlb_match_entry;
594
                else
595
                    entry->v = v;
596
                break;
597
            }
598
        }
599

    
600
        if (needs_tlb_flush)
601
            tlb_flush_page(s, vpn << 10);
602
        
603
    } else {
604
        int index = (addr & 0x00003f00) >> 8;
605
        tlb_t * entry = &s->utlb[index];
606
        if (entry->v) {
607
            /* Overwriting valid entry in utlb. */
608
            target_ulong address = entry->vpn << 10;
609
            tlb_flush_page(s, address);
610
        }
611
        entry->asid = asid;
612
        entry->vpn = vpn;
613
        entry->d = d;
614
        entry->v = v;
615
        increment_urc(s);
616
    }
617
}
618

    
619
int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
620
{
621
    int n;
622
    int use_asid = (env->mmucr & MMUCR_SV) == 0 || (env->sr & SR_MD) == 0;
623

    
624
    /* check area */
625
    if (env->sr & SR_MD) {
626
        /* For previledged mode, P2 and P4 area is not cachable. */
627
        if ((0xA0000000 <= addr && addr < 0xC0000000) || 0xE0000000 <= addr)
628
            return 0;
629
    } else {
630
        /* For user mode, only U0 area is cachable. */
631
        if (0x80000000 <= addr)
632
            return 0;
633
    }
634

    
635
    /*
636
     * TODO : Evaluate CCR and check if the cache is on or off.
637
     *        Now CCR is not in CPUSH4State, but in SH7750State.
638
     *        When you move the ccr inot CPUSH4State, the code will be
639
     *        as follows.
640
     */
641
#if 0
642
    /* check if operand cache is enabled or not. */
643
    if (!(env->ccr & 1))
644
        return 0;
645
#endif
646

    
647
    /* if MMU is off, no check for TLB. */
648
    if (env->mmucr & MMUCR_AT)
649
        return 1;
650

    
651
    /* check TLB */
652
    n = find_tlb_entry(env, addr, env->itlb, ITLB_SIZE, use_asid);
653
    if (n >= 0)
654
        return env->itlb[n].c;
655

    
656
    n = find_tlb_entry(env, addr, env->utlb, UTLB_SIZE, use_asid);
657
    if (n >= 0)
658
        return env->utlb[n].c;
659

    
660
    return 0;
661
}
662

    
663
#endif