tcg: Add TCG_TARGET_HAS_MEMORY_BSWAP

For now, defined universally as true, since we previously required
backends to implement swapped memory operations. Future patches
may now remove that support where it is onerous.

Backports commit e1dcf3529d0797b25bb49a20e94b62eb93e7276a from qemu
This commit is contained in:
Richard Henderson 2018-12-18 05:56:56 -05:00 committed by Lioncash
parent fdb3d6488e
commit 5c4e852c6e
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
7 changed files with 124 additions and 2 deletions

View file

@ -137,6 +137,7 @@ typedef enum {
#define TCG_TARGET_HAS_mul_vec 1
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{

View file

@ -132,6 +132,7 @@ enum {
};
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{

View file

@ -253,6 +253,8 @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
#ifdef CONFIG_SOFTMMU
#define TCG_TARGET_NEED_LDST_LABELS
#endif

View file

@ -203,6 +203,7 @@ extern bool use_mips32r2_instructions;
#endif
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{

View file

@ -135,6 +135,7 @@ extern uint64_t s390_facilities;
#define TCG_TARGET_CALL_STACK_OFFSET 160
#define TCG_TARGET_EXTEND_ARGS 1
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)

View file

@ -164,6 +164,7 @@ extern bool use_vis3_instructions;
#define TCG_AREG0 TCG_REG_I0
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
#ifdef _MSC_VER
#include <windows.h>

View file

@ -2723,29 +2723,82 @@ static void tcg_gen_req_mo(TCGContext *s, TCGBar type)
void tcg_gen_qemu_ld_i32(struct uc_struct *uc, TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
TCGMemOp orig_memop;
TCGContext *tcg_ctx = uc->tcg_ctx;
tcg_gen_req_mo(tcg_ctx, TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 0, 0);
orig_memop = memop;
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
memop &= ~MO_BSWAP;
/* The bswap primitive requires zero-extended input. */
if ((memop & MO_SSIZE) == MO_SW) {
memop &= ~MO_SIGN;
}
}
gen_ldst_i32(tcg_ctx, INDEX_op_qemu_ld_i32, val, addr, memop, idx);
if ((orig_memop ^ memop) & MO_BSWAP) {
switch (orig_memop & MO_SIZE) {
case MO_16:
tcg_gen_bswap16_i32(tcg_ctx, val, val);
if (orig_memop & MO_SIGN) {
tcg_gen_ext16s_i32(tcg_ctx, val, val);
}
break;
case MO_32:
tcg_gen_bswap32_i32(tcg_ctx, val, val);
break;
default:
g_assert_not_reached();
}
}
check_exit_request(tcg_ctx);
}
void tcg_gen_qemu_st_i32(struct uc_struct *uc, TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
TCGv_i32 swap = NULL;
TCGContext *tcg_ctx = uc->tcg_ctx;
tcg_gen_req_mo(tcg_ctx, TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 0, 1);
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
swap = tcg_temp_new_i32(tcg_ctx);
switch (memop & MO_SIZE) {
case MO_16:
tcg_gen_ext16u_i32(tcg_ctx, swap, val);
tcg_gen_bswap16_i32(tcg_ctx, swap, swap);
break;
case MO_32:
tcg_gen_bswap32_i32(tcg_ctx, swap, val);
break;
default:
g_assert_not_reached();
}
val = swap;
memop &= ~MO_BSWAP;
}
gen_ldst_i32(tcg_ctx, INDEX_op_qemu_st_i32, val, addr, memop, idx);
if (swap) {
tcg_temp_free_i32(tcg_ctx, swap);
}
check_exit_request(tcg_ctx);
}
void tcg_gen_qemu_ld_i64(struct uc_struct *uc, TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
TCGContext *tcg_ctx = uc->tcg_ctx;
TCGMemOp orig_memop;
tcg_gen_req_mo(tcg_ctx, TCG_MO_LD_LD | TCG_MO_ST_LD);
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_ld_i32(uc, TCGV_LOW(tcg_ctx, val), addr, idx, memop);
if (memop & MO_SIGN) {
@ -2758,24 +2811,86 @@ void tcg_gen_qemu_ld_i64(struct uc_struct *uc, TCGv_i64 val, TCGv addr, TCGArg i
return;
}
tcg_gen_req_mo(tcg_ctx, TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 1, 0);
orig_memop = memop;
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
memop &= ~MO_BSWAP;
/* The bswap primitive requires zero-extended input. */
if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) {
memop &= ~MO_SIGN;
}
}
gen_ldst_i64(tcg_ctx, INDEX_op_qemu_ld_i64, val, addr, memop, idx);
if ((orig_memop ^ memop) & MO_BSWAP) {
switch (orig_memop & MO_SIZE) {
case MO_16:
tcg_gen_bswap16_i64(tcg_ctx, val, val);
if (orig_memop & MO_SIGN) {
tcg_gen_ext16s_i64(tcg_ctx, val, val);
}
break;
case MO_32:
tcg_gen_bswap32_i64(tcg_ctx, val, val);
if (orig_memop & MO_SIGN) {
tcg_gen_ext32s_i64(tcg_ctx, val, val);
}
break;
case MO_64:
tcg_gen_bswap64_i64(tcg_ctx, val, val);
break;
default:
g_assert_not_reached();
}
}
check_exit_request(tcg_ctx);
}
void tcg_gen_qemu_st_i64(struct uc_struct *uc, TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
TCGContext *tcg_ctx = uc->tcg_ctx;
TCGv_i64 swap = NULL;
tcg_gen_req_mo(tcg_ctx, TCG_MO_LD_ST | TCG_MO_ST_ST);
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_st_i32(uc, TCGV_LOW(tcg_ctx, val), addr, idx, memop);
check_exit_request(tcg_ctx);
return;
}
tcg_gen_req_mo(tcg_ctx, TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 1, 1);
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
swap = tcg_temp_new_i64(tcg_ctx);
switch (memop & MO_SIZE) {
case MO_16:
tcg_gen_ext16u_i64(tcg_ctx, swap, val);
tcg_gen_bswap16_i64(tcg_ctx, swap, swap);
break;
case MO_32:
tcg_gen_ext32u_i64(tcg_ctx, swap, val);
tcg_gen_bswap32_i64(tcg_ctx, swap, swap);
break;
case MO_64:
tcg_gen_bswap64_i64(tcg_ctx, swap, val);
break;
default:
g_assert_not_reached();
}
val = swap;
memop &= ~MO_BSWAP;
}
gen_ldst_i64(tcg_ctx, INDEX_op_qemu_st_i64, val, addr, memop, idx);
if (swap) {
tcg_temp_free_i64(tcg_ctx, swap);
}
check_exit_request(tcg_ctx);
}