Fix masking of PC lower bits when doing exception returns

In commit 9b6a3ea7a699594 store_reg() was changed to mask
both bits 0 and 1 of the new PC value when in ARM mode.
Unfortunately this broke the exception return code paths
when doing a return from ARM mode to Thumb mode: in some
of these we write a new CPSR including new Thumb mode
bit via gen_helper_cpsr_write_eret(), and then use store_reg()
to write the new PC. In this case if the new CPSR specified
Thumb mode then masking bit 1 of the PC is incorrect
(these code paths correspond to the v8 ARM ARM pseudocode
function AArch32.ExceptionReturn(), which always aligns the
new PC appropriately for the new instruction set state).

Instead of using store_reg() in exception-return code paths,
call a new store_pc_exc_ret() which stores the raw new PC
value to env->regs[15], and then mask it appropriately in
the subsequent helper_cpsr_write_eret() where the new
env->thumb state is available.

This fixes a bug introduced by 9b6a3ea7a699594 which caused
crashes/hangs or otherwise bad behaviour for Linux when
userspace was using Thumb.

Backports commit fb0e8e79a9d77ee240dbca036fa8698ce654e5d1 from qemu
This commit is contained in:
Peter Maydell 2018-02-26 08:08:03 -05:00 committed by Lioncash
parent c69ae10ca7
commit f2dcb81b27
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
2 changed files with 29 additions and 9 deletions

View file

@ -479,6 +479,13 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
{
cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
/* Generated code has already stored the new PC value, but
* without masking out its low bits, because which bits need
* masking depends on whether we're returning to Thumb or ARM
* state. Do the masking now.
*/
env->regs[15] &= (env->thumb ? ~1 : ~3);
arm_call_el_change_hook(arm_env_get_cpu(env));
}

View file

@ -4488,28 +4488,39 @@ static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
s->is_jmp = DISAS_UPDATE;
}
/* Generate an old-style exception return. Marks pc as dead. */
static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
/* Store value to PC as for an exception return (ie don't
* mask bits). The subsequent call to gen_helper_cpsr_write_eret()
* will do the masking based on the new value of the Thumb bit.
*/
static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_i32 tmp;
store_reg(s, 15, pc);
tmp = load_cpu_field(s->uc, spsr);
gen_helper_cpsr_write_eret(tcg_ctx, tcg_ctx->cpu_env, tmp);
tcg_temp_free_i32(tcg_ctx, tmp);
s->is_jmp = DISAS_JUMP;
tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_R[15], pc);
tcg_temp_free_i32(tcg_ctx, pc);
}
/* Generate a v6 exception return. Marks both values as dead. */
static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
store_pc_exc_ret(s, pc);
/* The cpsr_write_eret helper will mask the low bits of PC
* appropriately depending on the new Thumb bit, so it must
* be called after storing the new PC.
*/
gen_helper_cpsr_write_eret(tcg_ctx, tcg_ctx->cpu_env, cpsr);
tcg_temp_free_i32(tcg_ctx, cpsr);
store_reg(s, 15, pc);
s->is_jmp = DISAS_JUMP;
}
/* Generate an old-style exception return. Marks pc as dead. */
static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
{
gen_rfe(s, pc, load_cpu_field(s->uc, spsr));
}
static void gen_nop_hint(DisasContext *s, int val)
{
switch (val) {
@ -9516,6 +9527,8 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) // qq
} else if (i == rn) {
loaded_var = tmp;
loaded_base = 1;
} else if (rn == 15 && exc_return) {
store_pc_exc_ret(s, tmp);
} else {
store_reg_from_load(s, i, tmp);
}