mirror of
https://github.com/yuzu-emu/unicorn
synced 2024-11-25 01:18:07 +00:00
softmmu: add helper function to pass through retaddr
This patch introduces several helpers to pass return address which points to the TB. Correct return address allows correct restoring of the guest PC and icount. These functions should be used when helpers embedded into TB invoke memory operations. Backports commit 282dffc8a4bfe8724548cabb8a26698bde0a6e18 from qemu
This commit is contained in:
parent
1722be3e73
commit
6cdaaf9b1b
3 changed files with 73 additions and 15 deletions
|
@ -29,20 +29,24 @@
|
|||
#define SUFFIX q
|
||||
#define USUFFIX q
|
||||
#define DATA_TYPE uint64_t
|
||||
#define SHIFT 3
|
||||
#elif DATA_SIZE == 4
|
||||
#define SUFFIX l
|
||||
#define USUFFIX l
|
||||
#define DATA_TYPE uint32_t
|
||||
#define SHIFT 2
|
||||
#elif DATA_SIZE == 2
|
||||
#define SUFFIX w
|
||||
#define USUFFIX uw
|
||||
#define DATA_TYPE uint16_t
|
||||
#define DATA_STYPE int16_t
|
||||
#define SHIFT 1
|
||||
#elif DATA_SIZE == 1
|
||||
#define SUFFIX b
|
||||
#define USUFFIX ub
|
||||
#define DATA_TYPE uint8_t
|
||||
#define DATA_STYPE int8_t
|
||||
#define SHIFT 0
|
||||
#else
|
||||
#error unsupported data size
|
||||
#endif
|
||||
|
@ -56,27 +60,36 @@
|
|||
#ifdef SOFTMMU_CODE_ACCESS
|
||||
#define ADDR_READ addr_code
|
||||
#define MMUSUFFIX _cmmu
|
||||
#define URETSUFFIX SUFFIX
|
||||
#define SRETSUFFIX SUFFIX
|
||||
#else
|
||||
#define ADDR_READ addr_read
|
||||
#define MMUSUFFIX _mmu
|
||||
#define URETSUFFIX USUFFIX
|
||||
#define SRETSUFFIX glue(s, SUFFIX)
|
||||
#endif
|
||||
|
||||
/* generic load/store macros */
|
||||
|
||||
static inline RES_TYPE
|
||||
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
|
||||
glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
|
||||
target_ulong ptr,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
int page_index;
|
||||
RES_TYPE res;
|
||||
target_ulong addr;
|
||||
int mmu_idx;
|
||||
TCGMemOpIdx oi;
|
||||
|
||||
addr = ptr;
|
||||
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
mmu_idx = CPU_MMU_INDEX;
|
||||
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
|
||||
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
|
||||
res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx);
|
||||
oi = make_memop_idx(SHIFT, mmu_idx);
|
||||
res = glue(glue(helper_ret_ld, URETSUFFIX), MMUSUFFIX)(env, addr,
|
||||
oi, retaddr);
|
||||
} else {
|
||||
uintptr_t hostaddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][page_index].addend);
|
||||
res = glue(glue(ld, USUFFIX), _raw)(hostaddr);
|
||||
|
@ -84,27 +97,42 @@ glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
|
|||
return res;
|
||||
}
|
||||
|
||||
static inline RES_TYPE
|
||||
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
|
||||
{
|
||||
return glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(env, ptr, 0);
|
||||
}
|
||||
|
||||
#if DATA_SIZE <= 2
|
||||
static inline int
|
||||
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
|
||||
glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
|
||||
target_ulong ptr,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
int res, page_index;
|
||||
target_ulong addr;
|
||||
int mmu_idx;
|
||||
TCGMemOpIdx oi;
|
||||
|
||||
addr = ptr;
|
||||
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
mmu_idx = CPU_MMU_INDEX;
|
||||
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
|
||||
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
|
||||
res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX),
|
||||
MMUSUFFIX)(env, addr, mmu_idx);
|
||||
oi = make_memop_idx(SHIFT, mmu_idx);
|
||||
res = (DATA_STYPE)glue(glue(helper_ret_ld, SRETSUFFIX),
|
||||
MMUSUFFIX)(env, addr, oi, retaddr);
|
||||
} else {
|
||||
uintptr_t hostaddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][page_index].addend);
|
||||
res = glue(glue(lds, SUFFIX), _raw)(hostaddr);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
static inline int
|
||||
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
|
||||
{
|
||||
return glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(env, ptr, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef SOFTMMU_CODE_ACCESS
|
||||
|
@ -112,26 +140,35 @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
|
|||
/* generic store macro */
|
||||
|
||||
static inline void
|
||||
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
|
||||
RES_TYPE v)
|
||||
glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
|
||||
target_ulong ptr,
|
||||
RES_TYPE v, uintptr_t retaddr)
|
||||
{
|
||||
int page_index;
|
||||
target_ulong addr;
|
||||
int mmu_idx;
|
||||
TCGMemOpIdx oi;
|
||||
|
||||
addr = ptr;
|
||||
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
mmu_idx = CPU_MMU_INDEX;
|
||||
if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
|
||||
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
|
||||
glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx);
|
||||
oi = make_memop_idx(SHIFT, mmu_idx);
|
||||
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, v, oi,
|
||||
retaddr);
|
||||
} else {
|
||||
uintptr_t hostaddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][page_index].addend);
|
||||
glue(glue(st, SUFFIX), _raw)(hostaddr, v);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
|
||||
RES_TYPE v)
|
||||
{
|
||||
glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(env, ptr, v, 0);
|
||||
}
|
||||
|
||||
#if DATA_SIZE == 8
|
||||
static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env,
|
||||
|
@ -191,3 +228,7 @@ static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env,
|
|||
#undef DATA_SIZE
|
||||
#undef MMUSUFFIX
|
||||
#undef ADDR_READ
|
||||
#undef URETSUFFIX
|
||||
#undef SRETSUFFIX
|
||||
#undef SHIFT
|
||||
|
||||
|
|
|
@ -180,9 +180,6 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef SOFTMMU_CODE_ACCESS
|
||||
static QEMU_UNUSED_FUNC
|
||||
#endif
|
||||
WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
|
@ -408,9 +405,6 @@ _out:
|
|||
}
|
||||
|
||||
#if DATA_SIZE > 1
|
||||
#ifdef SOFTMMU_CODE_ACCESS
|
||||
static QEMU_UNUSED_FUNC
|
||||
#endif
|
||||
WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
|
|
|
@ -1118,25 +1118,48 @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
|||
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
|
||||
uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
|
||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
||||
|
||||
/* Temporary aliases until backends are converted. */
|
||||
#ifdef TARGET_WORDS_BIGENDIAN
|
||||
# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
|
||||
# define helper_ret_lduw_mmu helper_be_lduw_mmu
|
||||
# define helper_ret_ldsl_mmu helper_be_ldsl_mmu
|
||||
# define helper_ret_ldul_mmu helper_be_ldul_mmu
|
||||
# define helper_ret_ldl_mmu helper_be_ldul_mmu
|
||||
# define helper_ret_ldq_mmu helper_be_ldq_mmu
|
||||
# define helper_ret_stw_mmu helper_be_stw_mmu
|
||||
# define helper_ret_stl_mmu helper_be_stl_mmu
|
||||
# define helper_ret_stq_mmu helper_be_stq_mmu
|
||||
# define helper_ret_ldw_cmmu helper_be_ldw_cmmu
|
||||
# define helper_ret_ldl_cmmu helper_be_ldl_cmmu
|
||||
# define helper_ret_ldq_cmmu helper_be_ldq_cmmu
|
||||
#else
|
||||
# define helper_ret_ldsw_mmu helper_le_ldsw_mmu
|
||||
# define helper_ret_lduw_mmu helper_le_lduw_mmu
|
||||
# define helper_ret_ldsl_mmu helper_le_ldsl_mmu
|
||||
# define helper_ret_ldul_mmu helper_le_ldul_mmu
|
||||
# define helper_ret_ldl_mmu helper_le_ldul_mmu
|
||||
# define helper_ret_ldq_mmu helper_le_ldq_mmu
|
||||
# define helper_ret_stw_mmu helper_le_stw_mmu
|
||||
# define helper_ret_stl_mmu helper_le_stl_mmu
|
||||
# define helper_ret_stq_mmu helper_le_stq_mmu
|
||||
# define helper_ret_ldw_cmmu helper_le_ldw_cmmu
|
||||
# define helper_ret_ldl_cmmu helper_le_ldl_cmmu
|
||||
# define helper_ret_ldq_cmmu helper_le_ldq_cmmu
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
|
|
Loading…
Reference in a new issue