Revision cee87be8

b/tcg/arm/tcg-target.c
1147 1147
    argreg = tcg_out_arg_reg32(s, argreg, arghi);
1148 1148
    return argreg;
1149 1149
}
1150
#endif /* SOFTMMU */
1151 1150

  
1152 1151
#define TLB_SHIFT	(CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1153 1152

  
1154
static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1155
{
1156
    int addr_reg, data_reg, data_reg2, bswap;
1157
#ifdef CONFIG_SOFTMMU
1158
    int mem_index, s_bits, tlb_offset;
1159
    TCGReg argreg;
1160
# if TARGET_LONG_BITS == 64
1161
    int addr_reg2;
1162
# endif
1163
    uint32_t *label_ptr;
1164
#endif
1165

  
1166
#ifdef TARGET_WORDS_BIGENDIAN
1167
    bswap = 1;
1168
#else
1169
    bswap = 0;
1170
#endif
1171
    data_reg = *args++;
1172
    if (opc == 3)
1173
        data_reg2 = *args++;
1174
    else
1175
        data_reg2 = 0; /* suppress warning */
1176
    addr_reg = *args++;
1177
#ifdef CONFIG_SOFTMMU
1178
# if TARGET_LONG_BITS == 64
1179
    addr_reg2 = *args++;
1180
# endif
1181
    mem_index = *args;
1182
    s_bits = opc & 3;
1153
/* Load and compare a TLB entry, leaving the flags set.  Leaves R0 pointing
1154
   to the tlb entry.  Clobbers R1 and TMP.  */
1183 1155

  
1156
static void tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1157
                             int s_bits, int tlb_offset)
1158
{
1184 1159
    /* Should generate something like the following:
1185 1160
     *  shr r8, addr_reg, #TARGET_PAGE_BITS
1186 1161
     *  and r0, r8, #(CPU_TLB_SIZE - 1)   @ Assumption: CPU_TLB_BITS <= 8
......
1190 1165
#   error
1191 1166
#  endif
1192 1167
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP,
1193
                    0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1168
                    0, addrlo, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1194 1169
    tcg_out_dat_imm(s, COND_AL, ARITH_AND,
1195 1170
                    TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1);
1196 1171
    tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0,
1197 1172
                    TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
1173

  
1198 1174
    /* We assume that the offset is contained within 20 bits.  */
1199
    tlb_offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
1200 1175
    assert((tlb_offset & ~0xfffff) == 0);
1201 1176
    if (tlb_offset > 0xfff) {
1202 1177
        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
......
1206 1181
    tcg_out_ld32_12wb(s, COND_AL, TCG_REG_R1, TCG_REG_R0, tlb_offset);
1207 1182
    tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
1208 1183
                    TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1184

  
1209 1185
    /* Check alignment.  */
1210
    if (s_bits)
1186
    if (s_bits) {
1211 1187
        tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1212
                        0, addr_reg, (1 << s_bits) - 1);
1213
#  if TARGET_LONG_BITS == 64
1214
    /* XXX: possibly we could use a block data load in the first access.  */
1215
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, 4);
1216
    tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1217
                    TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
1218
#  endif
1188
                        0, addrlo, (1 << s_bits) - 1);
1189
    }
1190

  
1191
    if (TARGET_LONG_BITS == 64) {
1192
        /* XXX: possibly we could use a block data load in the first access. */
1193
        tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, 4);
1194
        tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1195
                        TCG_REG_R1, addrhi, SHIFT_IMM_LSL(0));
1196
    }
1197
}
1198
#endif /* SOFTMMU */
1199

  
1200
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1201
{
1202
    TCGReg addr_reg, data_reg, data_reg2;
1203
    bool bswap;
1204
#ifdef CONFIG_SOFTMMU
1205
    int mem_index, s_bits;
1206
    TCGReg argreg, addr_reg2;
1207
    uint32_t *label_ptr;
1208
#endif
1209
#ifdef TARGET_WORDS_BIGENDIAN
1210
    bswap = 1;
1211
#else
1212
    bswap = 0;
1213
#endif
1214

  
1215
    data_reg = *args++;
1216
    data_reg2 = (opc == 3 ? *args++ : 0);
1217
    addr_reg = *args++;
1218
#ifdef CONFIG_SOFTMMU
1219
    addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1220
    mem_index = *args;
1221
    s_bits = opc & 3;
1222

  
1223
    tcg_out_tlb_read(s, addr_reg, addr_reg2, s_bits,
1224
                     offsetof(CPUArchState, tlb_table[mem_index][0].addr_read));
1225

  
1219 1226
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1220 1227
                    offsetof(CPUTLBEntry, addend)
1221 1228
                    - offsetof(CPUTLBEntry, addr_read));
......
1271 1278
     */
1272 1279
    argreg = TCG_REG_R0;
1273 1280
    argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
1274
#if TARGET_LONG_BITS == 64
1275
    argreg = tcg_out_arg_reg64(s, argreg, addr_reg, addr_reg2);
1276
#else
1277
    argreg = tcg_out_arg_reg32(s, argreg, addr_reg);
1278
#endif
1281
    if (TARGET_LONG_BITS == 64) {
1282
        argreg = tcg_out_arg_reg64(s, argreg, addr_reg, addr_reg2);
1283
    } else {
1284
        argreg = tcg_out_arg_reg32(s, argreg, addr_reg);
1285
    }
1279 1286
    argreg = tcg_out_arg_imm32(s, argreg, mem_index);
1280 1287
    tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[s_bits]);
1281 1288

  
......
1302 1309
#else /* !CONFIG_SOFTMMU */
1303 1310
    if (GUEST_BASE) {
1304 1311
        uint32_t offset = GUEST_BASE;
1305
        int i;
1306
        int rot;
1312
        int i, rot;
1307 1313

  
1308 1314
        while (offset) {
1309 1315
            i = ctz32(offset) & ~1;
......
1362 1368
#endif
1363 1369
}
1364 1370

  
1365
static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1371
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1366 1372
{
1367
    int addr_reg, data_reg, data_reg2, bswap;
1373
    TCGReg addr_reg, data_reg, data_reg2;
1374
    bool bswap;
1368 1375
#ifdef CONFIG_SOFTMMU
1369
    int mem_index, s_bits, tlb_offset;
1370
    TCGReg argreg;
1371
# if TARGET_LONG_BITS == 64
1372
    int addr_reg2;
1373
# endif
1376
    int mem_index, s_bits;
1377
    TCGReg argreg, addr_reg2;
1374 1378
    uint32_t *label_ptr;
1375 1379
#endif
1376

  
1377 1380
#ifdef TARGET_WORDS_BIGENDIAN
1378 1381
    bswap = 1;
1379 1382
#else
1380 1383
    bswap = 0;
1381 1384
#endif
1385

  
1382 1386
    data_reg = *args++;
1383
    if (opc == 3)
1384
        data_reg2 = *args++;
1385
    else
1386
        data_reg2 = 0; /* suppress warning */
1387
    data_reg2 = (opc == 3 ? *args++ : 0);
1387 1388
    addr_reg = *args++;
1388 1389
#ifdef CONFIG_SOFTMMU
1389
# if TARGET_LONG_BITS == 64
1390
    addr_reg2 = *args++;
1391
# endif
1390
    addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1392 1391
    mem_index = *args;
1393 1392
    s_bits = opc & 3;
1394 1393

  
1395
    /* Should generate something like the following:
1396
     *  shr r8, addr_reg, #TARGET_PAGE_BITS
1397
     *  and r0, r8, #(CPU_TLB_SIZE - 1)   @ Assumption: CPU_TLB_BITS <= 8
1398
     *  add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1399
     */
1400
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1401
                    TCG_REG_TMP, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1402
    tcg_out_dat_imm(s, COND_AL, ARITH_AND,
1403
                    TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1);
1404
    tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0,
1405
                    TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
1406
    /* We assume that the offset is contained within 20 bits.  */
1407
    tlb_offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
1408
    assert((tlb_offset & ~0xfffff) == 0);
1409
    if (tlb_offset > 0xfff) {
1410
        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
1411
                        0xa00 | (tlb_offset >> 12));
1412
        tlb_offset &= 0xfff;
1413
    }
1414
    tcg_out_ld32_12wb(s, COND_AL, TCG_REG_R1, TCG_REG_R0, tlb_offset);
1415
    tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
1416
                    TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1417
    /* Check alignment.  */
1418
    if (s_bits)
1419
        tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1420
                        0, addr_reg, (1 << s_bits) - 1);
1421
#  if TARGET_LONG_BITS == 64
1422
    /* XXX: possibly we could use a block data load in the first access.  */
1423
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, 4);
1424
    tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1425
                    TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
1426
#  endif
1394
    tcg_out_tlb_read(s, addr_reg, addr_reg2, s_bits,
1395
                     offsetof(CPUArchState,
1396
                              tlb_table[mem_index][0].addr_write));
1397

  
1427 1398
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1428 1399
                    offsetof(CPUTLBEntry, addend)
1429 1400
                    - offsetof(CPUTLBEntry, addr_write));
......
1472 1443
     */
1473 1444
    argreg = TCG_REG_R0;
1474 1445
    argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
1475
#if TARGET_LONG_BITS == 64
1476
    argreg = tcg_out_arg_reg64(s, argreg, addr_reg, addr_reg2);
1477
#else
1478
    argreg = tcg_out_arg_reg32(s, argreg, addr_reg);
1479
#endif
1446
    if (TARGET_LONG_BITS == 64) {
1447
        argreg = tcg_out_arg_reg64(s, argreg, addr_reg, addr_reg2);
1448
    } else {
1449
        argreg = tcg_out_arg_reg32(s, argreg, addr_reg);
1450
    }
1480 1451

  
1481 1452
    switch (opc) {
1482 1453
    case 0:

Also available in: Unified diff